summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJun Ohtani <johtani@gmail.com>2016-10-27 17:43:18 +0900
committerGitHub <noreply@github.com>2016-10-27 17:43:18 +0900
commita66c76eb446fed55efa7ad12f97923c248ff96d4 (patch)
tree8661f8187e47045bd9c8ef5ca32e85baf209f186
parentc1a9833445114c3490fe09300a2e3ce0c37cc97a (diff)
parent945fa499d223261fffc89c732f340ad112ae58d7 (diff)
Merge pull request #20704 from johtani/remove_request_params_in_analyze_api
Removing request parameters in _analyze API
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java34
-rw-r--r--core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java3
-rw-r--r--docs/plugins/analysis-icu.asciidoc15
-rw-r--r--docs/plugins/analysis-kuromoji.asciidoc58
-rw-r--r--docs/plugins/analysis-phonetic.asciidoc6
-rw-r--r--docs/reference/indices/analyze.asciidoc15
-rw-r--r--docs/reference/mapping/params/analyzer.asciidoc6
-rw-r--r--docs/reference/migration/migrate_6_0/rest.asciidoc4
-rw-r--r--plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/10_basic.yaml26
-rw-r--r--plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_kuromoji/10_basic.yaml31
-rw-r--r--plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/10_metaphone.yaml5
-rw-r--r--plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/20_double_metaphone.yaml5
-rw-r--r--plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/30_beider_morse.yaml5
-rw-r--r--plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/50_daitch_mokotoff.yaml5
-rw-r--r--plugins/analysis-smartcn/src/test/resources/rest-api-spec/test/analysis_smartcn/10_basic.yaml10
-rw-r--r--plugins/analysis-stempel/src/test/resources/rest-api-spec/test/analysis_stempel/10_basic.yaml12
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/api/indices.analyze.json34
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yaml23
18 files changed, 142 insertions, 155 deletions
diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java
index 247df1a380..1390e9d771 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java
@@ -22,13 +22,11 @@ import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcher;
-import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
-import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
@@ -67,42 +65,14 @@ public class RestAnalyzeAction extends BaseRestHandler {
@Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
- String[] texts = request.paramAsStringArrayOrEmptyIfAll("text");
-
AnalyzeRequest analyzeRequest = new AnalyzeRequest(request.param("index"));
- analyzeRequest.text(texts);
- analyzeRequest.analyzer(request.param("analyzer"));
- analyzeRequest.field(request.param("field"));
- final String tokenizer = request.param("tokenizer");
- if (tokenizer != null) {
- analyzeRequest.tokenizer(tokenizer);
- }
- for (String filter : request.paramAsStringArray("filter", Strings.EMPTY_ARRAY)) {
- analyzeRequest.addTokenFilter(filter);
- }
- for (String charFilter : request.paramAsStringArray("char_filter", Strings.EMPTY_ARRAY)) {
- analyzeRequest.addTokenFilter(charFilter);
- }
- analyzeRequest.explain(request.paramAsBoolean("explain", false));
- analyzeRequest.attributes(request.paramAsStringArray("attributes", analyzeRequest.attributes()));
- if (RestActions.hasBodyContent(request)) {
- XContentType type = RestActions.guessBodyContentType(request);
- if (type == null) {
- if (texts == null || texts.length == 0) {
- texts = new String[]{ RestActions.getRestContent(request).utf8ToString() };
- analyzeRequest.text(texts);
- }
- } else {
- // NOTE: if rest request with xcontent body has request parameters, the parameters does not override xcontent values
- buildFromContent(RestActions.getRestContent(request), analyzeRequest, parseFieldMatcher);
- }
- }
+ buildFromContent(RestActions.getRestContent(request), analyzeRequest, parseFieldMatcher);
return channel -> client.admin().indices().analyze(analyzeRequest, new RestToXContentListener<>(channel));
}
- public static void buildFromContent(BytesReference content, AnalyzeRequest analyzeRequest, ParseFieldMatcher parseFieldMatcher) {
+ static void buildFromContent(BytesReference content, AnalyzeRequest analyzeRequest, ParseFieldMatcher parseFieldMatcher) {
try (XContentParser parser = XContentHelper.createParser(content)) {
if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
throw new IllegalArgumentException("Malformed content, must start with an object");
diff --git a/core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java b/core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java
index 9b7d4073d0..385bfd17b1 100644
--- a/core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java
+++ b/core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java
@@ -118,7 +118,7 @@ public class RestAnalyzeActionTests extends ESTestCase {
assertThat(e.getMessage(), startsWith("explain must be either 'true' or 'false'"));
}
- public void testDeprecatedParamException() throws Exception {
+ public void testDeprecatedParamIn2xException() throws Exception {
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> RestAnalyzeAction.buildFromContent(
XContentFactory.jsonBuilder()
@@ -165,5 +165,4 @@ public class RestAnalyzeActionTests extends ESTestCase {
, new AnalyzeRequest("for test"), new ParseFieldMatcher(Settings.EMPTY)));
assertThat(e.getMessage(), startsWith("Unknown parameter [token_filter]"));
}
-
}
diff --git a/docs/plugins/analysis-icu.asciidoc b/docs/plugins/analysis-icu.asciidoc
index d119d118a4..1a0f4b5e92 100644
--- a/docs/plugins/analysis-icu.asciidoc
+++ b/docs/plugins/analysis-icu.asciidoc
@@ -164,7 +164,11 @@ PUT icu_sample
}
}
-POST icu_sample/_analyze?analyzer=my_analyzer&text=Elasticsearch. Wow!
+GET icu_sample/_analyze
+{
+ "analyzer": "my_analyzer",
+ "text": "Elasticsearch. Wow!"
+}
--------------------------------------------------
// CONSOLE
@@ -480,18 +484,21 @@ PUT icu_sample
}
}
-GET icu_sample/_analyze?analyzer=latin
+GET icu_sample/_analyze
{
+ "analyzer": "latin",
"text": "你好" <2>
}
-GET icu_sample/_analyze?analyzer=latin
+GET icu_sample/_analyze
{
+ "analyzer": "latin",
"text": "здравствуйте" <3>
}
-GET icu_sample/_analyze?analyzer=latin
+GET icu_sample/_analyze
{
+ "analyzer": "latin",
"text": "こんにちは" <4>
}
diff --git a/docs/plugins/analysis-kuromoji.asciidoc b/docs/plugins/analysis-kuromoji.asciidoc
index 56fa3f7ad5..69907b9812 100644
--- a/docs/plugins/analysis-kuromoji.asciidoc
+++ b/docs/plugins/analysis-kuromoji.asciidoc
@@ -175,7 +175,11 @@ PUT kuromoji_sample
}
}
-POST kuromoji_sample/_analyze?analyzer=my_analyzer&text=東京スカイツリー
+GET kuromoji_sample/_analyze
+{
+ "analyzer": "my_analyzer",
+ "text": "東京スカイツリー"
+}
--------------------------------------------------
// CONSOLE
@@ -228,7 +232,11 @@ PUT kuromoji_sample
}
}
-POST kuromoji_sample/_analyze?analyzer=my_analyzer&text=飲み
+GET kuromoji_sample/_analyze
+{
+ "analyzer": "my_analyzer",
+ "text": "飲み"
+}
--------------------------------------------------
// CONSOLE
@@ -290,7 +298,11 @@ PUT kuromoji_sample
}
}
-POST kuromoji_sample/_analyze?analyzer=my_analyzer&text=寿司がおいしいね
+GET kuromoji_sample/_analyze
+{
+ "analyzer": "my_analyzer",
+ "text": "寿司がおいしいね"
+}
--------------------------------------------------
// CONSOLE
@@ -363,9 +375,17 @@ PUT kuromoji_sample
}
}
-POST kuromoji_sample/_analyze?analyzer=katakana_analyzer&text=寿司 <1>
+GET kuromoji_sample/_analyze
+{
+ "analyzer": "katakana_analyzer",
+ "text": "寿司" <1>
+}
-POST kuromoji_sample/_analyze?analyzer=romaji_analyzer&text=寿司 <2>
+GET kuromoji_sample/_analyze
+{
+ "analyzer": "romaji_analyzer",
+ "text": "寿司" <2>
+}
--------------------------------------------------
// CONSOLE
@@ -413,9 +433,17 @@ PUT kuromoji_sample
}
}
-POST kuromoji_sample/_analyze?analyzer=my_analyzer&text=コピー <1>
+GET kuromoji_sample/_analyze
+{
+ "analyzer": "my_analyzer",
+ "text": "コピー" <1>
+}
-POST kuromoji_sample/_analyze?analyzer=my_analyzer&text=サーバー <2>
+GET kuromoji_sample/_analyze
+{
+ "analyzer": "my_analyzer",
+ "text": "サーバー" <2>
+}
--------------------------------------------------
// CONSOLE
@@ -424,7 +452,7 @@ POST kuromoji_sample/_analyze?analyzer=my_analyzer&text=サーバー <2>
[[analysis-kuromoji-stop]]
-===== `ja_stop` token filter
+==== `ja_stop` token filter
The `ja_stop` token filter filters out Japanese stopwords (`_japanese_`), and
any other custom stopwords specified by the user. This filter only supports
@@ -461,7 +489,11 @@ PUT kuromoji_sample
}
}
-POST kuromoji_sample/_analyze?analyzer=analyzer_with_ja_stop&text=ストップは消える
+GET kuromoji_sample/_analyze
+{
+ "analyzer": "analyzer_with_ja_stop",
+ "text": "ストップは消える"
+}
--------------------------------------------------
// CONSOLE
@@ -482,7 +514,7 @@ The above request returns:
// TESTRESPONSE
[[analysis-kuromoji-number]]
-===== `kuromoji_number` token filter
+==== `kuromoji_number` token filter
The `kuromoji_number` token filter normalizes Japanese numbers (kansūji)
to regular Arabic decimal numbers in half-width characters. For example:
@@ -507,7 +539,11 @@ PUT kuromoji_sample
}
}
-POST kuromoji_sample/_analyze?analyzer=my_analyzer&text=一〇〇〇
+GET kuromoji_sample/_analyze
+{
+ "analyzer": "my_analyzer",
+ "text": "一〇〇〇"
+}
--------------------------------------------------
// CONSOLE
diff --git a/docs/plugins/analysis-phonetic.asciidoc b/docs/plugins/analysis-phonetic.asciidoc
index 815cbcdb43..fb0f6832d4 100644
--- a/docs/plugins/analysis-phonetic.asciidoc
+++ b/docs/plugins/analysis-phonetic.asciidoc
@@ -82,7 +82,11 @@ PUT phonetic_sample
}
}
-POST phonetic_sample/_analyze?analyzer=my_analyzer&text=Joe Bloggs <1>
+GET phonetic_sample/_analyze
+{
+ "analyzer": "my_analyzer",
+ "text": "Joe Bloggs" <1>
+}
--------------------------------------------------
// CONSOLE
diff --git a/docs/reference/indices/analyze.asciidoc b/docs/reference/indices/analyze.asciidoc
index dbb2c8f101..0d9d60d484 100644
--- a/docs/reference/indices/analyze.asciidoc
+++ b/docs/reference/indices/analyze.asciidoc
@@ -100,21 +100,6 @@ curl -XGET 'localhost:9200/test/_analyze' -d '
Will cause the analysis to happen based on the analyzer configured in the
mapping for `obj1.field1` (and if not, the default index analyzer).
-All parameters can also supplied as request parameters. For example:
-
-[source,js]
---------------------------------------------------
-curl -XGET 'localhost:9200/_analyze?tokenizer=keyword&filter=lowercase&text=this+is+a+test'
---------------------------------------------------
-
-For backwards compatibility, we also accept the text parameter as the body of the request,
-provided it doesn't start with `{` :
-
-[source,js]
---------------------------------------------------
-curl -XGET 'localhost:9200/_analyze?tokenizer=keyword&filter=lowercase&char_filter=html_strip' -d 'this is a <b>test</b>'
---------------------------------------------------
-
=== Explain Analyze
If you want to get more advanced details, set `explain` to `true` (defaults to `false`). It will output all token attributes for each token.
diff --git a/docs/reference/mapping/params/analyzer.asciidoc b/docs/reference/mapping/params/analyzer.asciidoc
index c075b66280..0b60451e02 100644
--- a/docs/reference/mapping/params/analyzer.asciidoc
+++ b/docs/reference/mapping/params/analyzer.asciidoc
@@ -60,13 +60,15 @@ PUT /my_index
}
}
-GET my_index/_analyze?field=text <3>
+GET my_index/_analyze <3>
{
+ "field": "text",
"text": "The quick Brown Foxes."
}
-GET my_index/_analyze?field=text.english <4>
+GET my_index/_analyze <4>
{
+ "field": "text.english",
"text": "The quick Brown Foxes."
}
--------------------------------------------------
diff --git a/docs/reference/migration/migrate_6_0/rest.asciidoc b/docs/reference/migration/migrate_6_0/rest.asciidoc
index a0ac594e3f..897b3a53e8 100644
--- a/docs/reference/migration/migrate_6_0/rest.asciidoc
+++ b/docs/reference/migration/migrate_6_0/rest.asciidoc
@@ -7,3 +7,7 @@ In previous versions of Elasticsearch, JSON documents were allowed to contain un
This feature was removed in the 5.x series, but a backwards-compatibility layer was added via the
system property `elasticsearch.json.allow_unquoted_field_names`. This backwards-compatibility layer
has been removed in Elasticsearch 6.0.0.
+
+==== Analyze API changes
+
+The deprecated request parameters and plain text in request body has been removed. Define parameters in request body.
diff --git a/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/10_basic.yaml b/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/10_basic.yaml
index 64fbbcadf7..180f6c6f5b 100644
--- a/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/10_basic.yaml
+++ b/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/10_basic.yaml
@@ -3,8 +3,9 @@
"Tokenizer":
- do:
indices.analyze:
- text: Foo Bar
- tokenizer: icu_tokenizer
+ body:
+ text: Foo Bar
+ tokenizer: icu_tokenizer
- length: { tokens: 2 }
- match: { tokens.0.token: Foo }
- match: { tokens.1.token: Bar }
@@ -12,26 +13,29 @@
"Normalization filter":
- do:
indices.analyze:
- filter: icu_normalizer
- text: Foo Bar Ruß
- tokenizer: keyword
+ body:
+ filter: [icu_normalizer]
+ text: Foo Bar Ruß
+ tokenizer: keyword
- length: { tokens: 1 }
- match: { tokens.0.token: foo bar russ }
---
"Normalization charfilter":
- do:
indices.analyze:
- char_filter: icu_normalizer
- text: Foo Bar Ruß
- tokenizer: keyword
+ body:
+ char_filter: [icu_normalizer]
+ text: Foo Bar Ruß
+ tokenizer: keyword
- length: { tokens: 1 }
- match: { tokens.0.token: foo bar russ }
---
"Folding filter":
- do:
indices.analyze:
- filter: icu_folding
- text: Foo Bar résumé
- tokenizer: keyword
+ body:
+ filter: [icu_folding]
+ text: Foo Bar résumé
+ tokenizer: keyword
- length: { tokens: 1 }
- match: { tokens.0.token: foo bar resume }
diff --git a/plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_kuromoji/10_basic.yaml b/plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_kuromoji/10_basic.yaml
index 42df558567..1cca2b728e 100644
--- a/plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_kuromoji/10_basic.yaml
+++ b/plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_kuromoji/10_basic.yaml
@@ -4,8 +4,9 @@
"Analyzer":
- do:
indices.analyze:
- text: JR新宿駅の近くにビールを飲みに行こうか
- analyzer: kuromoji
+ body:
+ text: JR新宿駅の近くにビールを飲みに行こうか
+ analyzer: kuromoji
- length: { tokens: 7 }
- match: { tokens.0.token: jr }
- match: { tokens.1.token: 新宿 }
@@ -18,8 +19,9 @@
"Tokenizer":
- do:
indices.analyze:
- text: 関西国際空港
- tokenizer: kuromoji_tokenizer
+ body:
+ text: 関西国際空港
+ tokenizer: kuromoji_tokenizer
- length: { tokens: 4 }
- match: { tokens.0.token: 関西 }
- match: { tokens.1.token: 関西国際空港 }
@@ -29,26 +31,29 @@
"Baseform filter":
- do:
indices.analyze:
- text: 飲み
- tokenizer: kuromoji_tokenizer
- filter: kuromoji_baseform
+ body:
+ text: 飲み
+ tokenizer: kuromoji_tokenizer
+ filter: [kuromoji_baseform]
- length: { tokens: 1 }
- match: { tokens.0.token: 飲む }
---
"Reading filter":
- do:
indices.analyze:
- text: 寿司
- tokenizer: kuromoji_tokenizer
- filter: kuromoji_readingform
+ body:
+ text: 寿司
+ tokenizer: kuromoji_tokenizer
+ filter: [kuromoji_readingform]
- length: { tokens: 1 }
- match: { tokens.0.token: スシ }
---
"Stemming filter":
- do:
indices.analyze:
- text: サーバー
- tokenizer: kuromoji_tokenizer
- filter: kuromoji_stemmer
+ body:
+ text: サーバー
+ tokenizer: kuromoji_tokenizer
+ filter: [kuromoji_stemmer]
- length: { tokens: 1 }
- match: { tokens.0.token: サーバ }
diff --git a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/10_metaphone.yaml b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/10_metaphone.yaml
index 02d4b315b6..1f326fe377 100644
--- a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/10_metaphone.yaml
+++ b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/10_metaphone.yaml
@@ -22,8 +22,9 @@
- do:
indices.analyze:
index: phonetic_sample
- analyzer: my_analyzer
- text: Joe Bloggs
+ body:
+ analyzer: my_analyzer
+ text: Joe Bloggs
- length: { tokens: 4 }
- match: { tokens.0.token: J }
diff --git a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/20_double_metaphone.yaml b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/20_double_metaphone.yaml
index 675847e557..5af9f48aa8 100644
--- a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/20_double_metaphone.yaml
+++ b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/20_double_metaphone.yaml
@@ -22,8 +22,9 @@
- do:
indices.analyze:
index: phonetic_sample
- analyzer: my_analyzer
- text: supercalifragilisticexpialidocious
+ body:
+ analyzer: my_analyzer
+ text: supercalifragilisticexpialidocious
- length: { tokens: 1 }
- match: { tokens.0.token: SPRKLF }
diff --git a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/30_beider_morse.yaml b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/30_beider_morse.yaml
index 015610af17..259b0adea7 100644
--- a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/30_beider_morse.yaml
+++ b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/30_beider_morse.yaml
@@ -24,8 +24,9 @@
- do:
indices.analyze:
index: phonetic_sample
- analyzer: my_analyzer
- text: Szwarc
+ body:
+ analyzer: my_analyzer
+ text: Szwarc
- length: { tokens: 1 }
- match: { tokens.0.token: Svarts }
diff --git a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/50_daitch_mokotoff.yaml b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/50_daitch_mokotoff.yaml
index 5125ae3d68..c67b6892bc 100644
--- a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/50_daitch_mokotoff.yaml
+++ b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/50_daitch_mokotoff.yaml
@@ -21,8 +21,9 @@
- do:
indices.analyze:
index: phonetic_sample
- analyzer: my_analyzer
- text: Moskowitz
+ body:
+ analyzer: my_analyzer
+ text: Moskowitz
- length: { tokens: 1 }
- match: { tokens.0.token: "645740" }
diff --git a/plugins/analysis-smartcn/src/test/resources/rest-api-spec/test/analysis_smartcn/10_basic.yaml b/plugins/analysis-smartcn/src/test/resources/rest-api-spec/test/analysis_smartcn/10_basic.yaml
index 2549f774f8..0f1b2805c9 100644
--- a/plugins/analysis-smartcn/src/test/resources/rest-api-spec/test/analysis_smartcn/10_basic.yaml
+++ b/plugins/analysis-smartcn/src/test/resources/rest-api-spec/test/analysis_smartcn/10_basic.yaml
@@ -3,8 +3,9 @@
"Tokenizer":
- do:
indices.analyze:
- text: 我购买了道具和服装。
- tokenizer: smartcn_tokenizer
+ body:
+ text: 我购买了道具和服装。
+ tokenizer: smartcn_tokenizer
- length: { tokens: 7 }
- match: { tokens.0.token: 我 }
- match: { tokens.1.token: 购买 }
@@ -17,8 +18,9 @@
"Analyzer":
- do:
indices.analyze:
- text: 我购买了道具和服装。
- analyzer: smartcn
+ body:
+ text: 我购买了道具和服装。
+ analyzer: smartcn
- length: { tokens: 6 }
- match: { tokens.0.token: 我 }
- match: { tokens.1.token: 购买 }
diff --git a/plugins/analysis-stempel/src/test/resources/rest-api-spec/test/analysis_stempel/10_basic.yaml b/plugins/analysis-stempel/src/test/resources/rest-api-spec/test/analysis_stempel/10_basic.yaml
index f87f00b792..1941126c64 100644
--- a/plugins/analysis-stempel/src/test/resources/rest-api-spec/test/analysis_stempel/10_basic.yaml
+++ b/plugins/analysis-stempel/src/test/resources/rest-api-spec/test/analysis_stempel/10_basic.yaml
@@ -3,16 +3,18 @@
"Stemmer":
- do:
indices.analyze:
- text: studenci
- tokenizer: keyword
- filter: polish_stem
+ body:
+ text: studenci
+ tokenizer: keyword
+ filter: [polish_stem]
- length: { tokens: 1 }
- match: { tokens.0.token: student }
---
"Analyzer":
- do:
indices.analyze:
- text: studenta był
- analyzer: polish
+ body:
+ text: studenta był
+ analyzer: polish
- length: { tokens: 1 }
- match: { tokens.0.token: student }
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.analyze.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.analyze.json
index 881382ffa0..9396538891 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.analyze.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.analyze.json
@@ -12,22 +12,6 @@
}
},
"params": {
- "analyzer": {
- "type" : "string",
- "description" : "The name of the analyzer to use"
- },
- "char_filter": {
- "type" : "list",
- "description" : "A comma-separated list of character filters to use for the analysis"
- },
- "field": {
- "type" : "string",
- "description" : "Use the analyzer configured for this field (instead of passing the analyzer name)"
- },
- "filter": {
- "type" : "list",
- "description" : "A comma-separated list of filters to use for the analysis"
- },
"index": {
"type" : "string",
"description" : "The name of the index to scope the operation"
@@ -36,22 +20,6 @@
"type" : "boolean",
"description" : "With `true`, specify that a local shard should be used if available, with `false`, use a random shard (default: true)"
},
- "text": {
- "type" : "list",
- "description" : "The text on which the analysis should be performed (when request body is not used)"
- },
- "tokenizer": {
- "type" : "string",
- "description" : "The name of the tokenizer to use for the analysis"
- },
- "explain": {
- "type" : "boolean",
- "description" : "With `true`, outputs more advanced details. (default: false)"
- },
- "attributes": {
- "type" : "list",
- "description" : "A comma-separated list of token attributes to output, this parameter works only with `explain=true`"
- },
"format": {
"type": "enum",
"options" : ["detailed","text"],
@@ -61,7 +29,7 @@
}
},
"body": {
- "description" : "The text on which the analysis should be performed"
+ "description" : "Define analyzer/tokenizer parameters and the text on which the analysis should be performed"
}
}
}
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yaml
index 35d4a2b522..268cd78128 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yaml
@@ -8,7 +8,8 @@ setup:
"Basic test":
- do:
indices.analyze:
- text: Foo Bar
+ body:
+ text: Foo Bar
- length: { tokens: 2 }
- match: { tokens.0.token: foo }
- match: { tokens.1.token: bar }
@@ -17,9 +18,10 @@ setup:
"Tokenizer and filter":
- do:
indices.analyze:
- filter: lowercase
- text: Foo Bar
- tokenizer: keyword
+ body:
+ filter: [lowercase]
+ text: Foo Bar
+ tokenizer: keyword
- length: { tokens: 1 }
- match: { tokens.0.token: foo bar }
@@ -38,9 +40,10 @@ setup:
- do:
indices.analyze:
- field: text
index: test
- text: Foo Bar!
+ body:
+ field: text
+ text: Foo Bar!
- length: { tokens: 2 }
- match: { tokens.0.token: Foo }
- match: { tokens.1.token: Bar! }
@@ -52,14 +55,6 @@ setup:
- length: {tokens: 1 }
- match: { tokens.0.token: foo bar }
---
-"Body params override query string":
- - do:
- indices.analyze:
- text: Foo Bar
- body: { "text": "Bar Foo", "filter": ["lowercase"], "tokenizer": keyword }
- - length: {tokens: 1 }
- - match: { tokens.0.token: bar foo }
----
"Array text":
- do:
indices.analyze: