blob: 43378411ae45c497d0b7628390caac4d3f7d4269 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
|
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.core.LowerCaseFilter;
import org.apache.lucene.analysis.core.StopFilter;
import org.apache.lucene.analysis.pattern.PatternTokenizer;
import org.apache.lucene.analysis.util.CharArraySet;
import java.util.regex.Pattern;
/** Simple regex-based analyzer based on PatternTokenizer + lowercase + stopwords */
public final class PatternAnalyzer extends Analyzer {
private final Pattern pattern;
private final boolean lowercase;
private final CharArraySet stopWords;
public PatternAnalyzer(Pattern pattern, boolean lowercase, CharArraySet stopWords) {
this.pattern = pattern;
this.lowercase = lowercase;
this.stopWords = stopWords;
}
@Override
protected TokenStreamComponents createComponents(String s) {
final Tokenizer tokenizer = new PatternTokenizer(pattern, -1);
TokenStream stream = tokenizer;
if (lowercase) {
stream = new LowerCaseFilter(stream);
}
if (stopWords != null) {
stream = new StopFilter(stream, stopWords);
}
return new TokenStreamComponents(tokenizer, stream);
}
}
|