summaryrefslogtreecommitdiff
path: root/docs
diff options
context:
space:
mode:
authorJun Ohtani <johtani@gmail.com>2016-10-27 17:43:18 +0900
committerGitHub <noreply@github.com>2016-10-27 17:43:18 +0900
commita66c76eb446fed55efa7ad12f97923c248ff96d4 (patch)
tree8661f8187e47045bd9c8ef5ca32e85baf209f186 /docs
parentc1a9833445114c3490fe09300a2e3ce0c37cc97a (diff)
parent945fa499d223261fffc89c732f340ad112ae58d7 (diff)
Merge pull request #20704 from johtani/remove_request_params_in_analyze_api
Removing request parameters in _analyze API
Diffstat (limited to 'docs')
-rw-r--r--docs/plugins/analysis-icu.asciidoc15
-rw-r--r--docs/plugins/analysis-kuromoji.asciidoc58
-rw-r--r--docs/plugins/analysis-phonetic.asciidoc6
-rw-r--r--docs/reference/indices/analyze.asciidoc15
-rw-r--r--docs/reference/mapping/params/analyzer.asciidoc6
-rw-r--r--docs/reference/migration/migrate_6_0/rest.asciidoc4
6 files changed, 71 insertions, 33 deletions
diff --git a/docs/plugins/analysis-icu.asciidoc b/docs/plugins/analysis-icu.asciidoc
index d119d118a4..1a0f4b5e92 100644
--- a/docs/plugins/analysis-icu.asciidoc
+++ b/docs/plugins/analysis-icu.asciidoc
@@ -164,7 +164,11 @@ PUT icu_sample
}
}
-POST icu_sample/_analyze?analyzer=my_analyzer&text=Elasticsearch. Wow!
+GET icu_sample/_analyze
+{
+ "analyzer": "my_analyzer",
+ "text": "Elasticsearch. Wow!"
+}
--------------------------------------------------
// CONSOLE
@@ -480,18 +484,21 @@ PUT icu_sample
}
}
-GET icu_sample/_analyze?analyzer=latin
+GET icu_sample/_analyze
{
+ "analyzer": "latin",
"text": "你好" <2>
}
-GET icu_sample/_analyze?analyzer=latin
+GET icu_sample/_analyze
{
+ "analyzer": "latin",
"text": "здравствуйте" <3>
}
-GET icu_sample/_analyze?analyzer=latin
+GET icu_sample/_analyze
{
+ "analyzer": "latin",
"text": "こんにちは" <4>
}
diff --git a/docs/plugins/analysis-kuromoji.asciidoc b/docs/plugins/analysis-kuromoji.asciidoc
index 56fa3f7ad5..69907b9812 100644
--- a/docs/plugins/analysis-kuromoji.asciidoc
+++ b/docs/plugins/analysis-kuromoji.asciidoc
@@ -175,7 +175,11 @@ PUT kuromoji_sample
}
}
-POST kuromoji_sample/_analyze?analyzer=my_analyzer&text=東京スカイツリー
+GET kuromoji_sample/_analyze
+{
+ "analyzer": "my_analyzer",
+ "text": "東京スカイツリー"
+}
--------------------------------------------------
// CONSOLE
@@ -228,7 +232,11 @@ PUT kuromoji_sample
}
}
-POST kuromoji_sample/_analyze?analyzer=my_analyzer&text=飲み
+GET kuromoji_sample/_analyze
+{
+ "analyzer": "my_analyzer",
+ "text": "飲み"
+}
--------------------------------------------------
// CONSOLE
@@ -290,7 +298,11 @@ PUT kuromoji_sample
}
}
-POST kuromoji_sample/_analyze?analyzer=my_analyzer&text=寿司がおいしいね
+GET kuromoji_sample/_analyze
+{
+ "analyzer": "my_analyzer",
+ "text": "寿司がおいしいね"
+}
--------------------------------------------------
// CONSOLE
@@ -363,9 +375,17 @@ PUT kuromoji_sample
}
}
-POST kuromoji_sample/_analyze?analyzer=katakana_analyzer&text=寿司 <1>
+GET kuromoji_sample/_analyze
+{
+ "analyzer": "katakana_analyzer",
+ "text": "寿司" <1>
+}
-POST kuromoji_sample/_analyze?analyzer=romaji_analyzer&text=寿司 <2>
+GET kuromoji_sample/_analyze
+{
+ "analyzer": "romaji_analyzer",
+ "text": "寿司" <2>
+}
--------------------------------------------------
// CONSOLE
@@ -413,9 +433,17 @@ PUT kuromoji_sample
}
}
-POST kuromoji_sample/_analyze?analyzer=my_analyzer&text=コピー <1>
+GET kuromoji_sample/_analyze
+{
+ "analyzer": "my_analyzer",
+ "text": "コピー" <1>
+}
-POST kuromoji_sample/_analyze?analyzer=my_analyzer&text=サーバー <2>
+GET kuromoji_sample/_analyze
+{
+ "analyzer": "my_analyzer",
+ "text": "サーバー" <2>
+}
--------------------------------------------------
// CONSOLE
@@ -424,7 +452,7 @@ POST kuromoji_sample/_analyze?analyzer=my_analyzer&text=サーバー <2>
[[analysis-kuromoji-stop]]
-===== `ja_stop` token filter
+==== `ja_stop` token filter
The `ja_stop` token filter filters out Japanese stopwords (`_japanese_`), and
any other custom stopwords specified by the user. This filter only supports
@@ -461,7 +489,11 @@ PUT kuromoji_sample
}
}
-POST kuromoji_sample/_analyze?analyzer=analyzer_with_ja_stop&text=ストップは消える
+GET kuromoji_sample/_analyze
+{
+ "analyzer": "analyzer_with_ja_stop",
+ "text": "ストップは消える"
+}
--------------------------------------------------
// CONSOLE
@@ -482,7 +514,7 @@ The above request returns:
// TESTRESPONSE
[[analysis-kuromoji-number]]
-===== `kuromoji_number` token filter
+==== `kuromoji_number` token filter
The `kuromoji_number` token filter normalizes Japanese numbers (kansūji)
to regular Arabic decimal numbers in half-width characters. For example:
@@ -507,7 +539,11 @@ PUT kuromoji_sample
}
}
-POST kuromoji_sample/_analyze?analyzer=my_analyzer&text=一〇〇〇
+GET kuromoji_sample/_analyze
+{
+ "analyzer": "my_analyzer",
+ "text": "一〇〇〇"
+}
--------------------------------------------------
// CONSOLE
diff --git a/docs/plugins/analysis-phonetic.asciidoc b/docs/plugins/analysis-phonetic.asciidoc
index 815cbcdb43..fb0f6832d4 100644
--- a/docs/plugins/analysis-phonetic.asciidoc
+++ b/docs/plugins/analysis-phonetic.asciidoc
@@ -82,7 +82,11 @@ PUT phonetic_sample
}
}
-POST phonetic_sample/_analyze?analyzer=my_analyzer&text=Joe Bloggs <1>
+GET phonetic_sample/_analyze
+{
+ "analyzer": "my_analyzer",
+ "text": "Joe Bloggs" <1>
+}
--------------------------------------------------
// CONSOLE
diff --git a/docs/reference/indices/analyze.asciidoc b/docs/reference/indices/analyze.asciidoc
index dbb2c8f101..0d9d60d484 100644
--- a/docs/reference/indices/analyze.asciidoc
+++ b/docs/reference/indices/analyze.asciidoc
@@ -100,21 +100,6 @@ curl -XGET 'localhost:9200/test/_analyze' -d '
Will cause the analysis to happen based on the analyzer configured in the
mapping for `obj1.field1` (and if not, the default index analyzer).
-All parameters can also supplied as request parameters. For example:
-
-[source,js]
---------------------------------------------------
-curl -XGET 'localhost:9200/_analyze?tokenizer=keyword&filter=lowercase&text=this+is+a+test'
---------------------------------------------------
-
-For backwards compatibility, we also accept the text parameter as the body of the request,
-provided it doesn't start with `{` :
-
-[source,js]
---------------------------------------------------
-curl -XGET 'localhost:9200/_analyze?tokenizer=keyword&filter=lowercase&char_filter=html_strip' -d 'this is a <b>test</b>'
---------------------------------------------------
-
=== Explain Analyze
If you want to get more advanced details, set `explain` to `true` (defaults to `false`). It will output all token attributes for each token.
diff --git a/docs/reference/mapping/params/analyzer.asciidoc b/docs/reference/mapping/params/analyzer.asciidoc
index c075b66280..0b60451e02 100644
--- a/docs/reference/mapping/params/analyzer.asciidoc
+++ b/docs/reference/mapping/params/analyzer.asciidoc
@@ -60,13 +60,15 @@ PUT /my_index
}
}
-GET my_index/_analyze?field=text <3>
+GET my_index/_analyze <3>
{
+ "field": "text",
"text": "The quick Brown Foxes."
}
-GET my_index/_analyze?field=text.english <4>
+GET my_index/_analyze <4>
{
+ "field": "text.english",
"text": "The quick Brown Foxes."
}
--------------------------------------------------
diff --git a/docs/reference/migration/migrate_6_0/rest.asciidoc b/docs/reference/migration/migrate_6_0/rest.asciidoc
index a0ac594e3f..897b3a53e8 100644
--- a/docs/reference/migration/migrate_6_0/rest.asciidoc
+++ b/docs/reference/migration/migrate_6_0/rest.asciidoc
@@ -7,3 +7,7 @@ In previous versions of Elasticsearch, JSON documents were allowed to contain un
This feature was removed in the 5.x series, but a backwards-compatibility layer was added via the
system property `elasticsearch.json.allow_unquoted_field_names`. This backwards-compatibility layer
has been removed in Elasticsearch 6.0.0.
+
+==== Analyze API changes
+
+The deprecated request parameters and plain text in request body has been removed. Define parameters in request body.