Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

AnalyzeTextRequest to AnalyzeTextOptions. #11783

Merged
merged 1 commit into from
Jun 4, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

package com.azure.search.documents.implementation.converters;

import com.azure.search.documents.indexes.models.AnalyzeTextRequest;
import com.azure.search.documents.indexes.models.AnalyzeTextOptions;
import com.azure.search.documents.indexes.models.CharFilterName;
import com.azure.search.documents.indexes.models.LexicalAnalyzerName;
import com.azure.search.documents.indexes.models.LexicalTokenizerName;
Expand All @@ -14,49 +14,49 @@

/**
* A converter between {@link com.azure.search.documents.indexes.implementation.models.AnalyzeRequest} and
* {@link AnalyzeTextRequest}.
* {@link AnalyzeTextOptions}.
*/
public final class AnalyzeRequestConverter {
/**
* Maps from {@link com.azure.search.documents.indexes.implementation.models.AnalyzeRequest} to {@link AnalyzeTextRequest}.
* Maps from {@link com.azure.search.documents.indexes.implementation.models.AnalyzeRequest} to {@link AnalyzeTextOptions}.
*/
public static AnalyzeTextRequest map(com.azure.search.documents.indexes.implementation.models.AnalyzeRequest obj) {
public static AnalyzeTextOptions map(com.azure.search.documents.indexes.implementation.models.AnalyzeRequest obj) {
if (obj == null) {
return null;
}
AnalyzeTextRequest analyzeTextRequest = new AnalyzeTextRequest();
AnalyzeTextOptions analyzeTextOptions = new AnalyzeTextOptions();

if (obj.getCharFilters() != null) {
List<CharFilterName> charFilters =
obj.getCharFilters().stream().map(CharFilterNameConverter::map).collect(Collectors.toList());
analyzeTextRequest.setCharFilters(charFilters);
analyzeTextOptions.setCharFilters(charFilters);
}

if (obj.getAnalyzer() != null) {
LexicalAnalyzerName analyzer = LexicalAnalyzerNameConverter.map(obj.getAnalyzer());
analyzeTextRequest.setAnalyzer(analyzer);
analyzeTextOptions.setAnalyzer(analyzer);
}

if (obj.getTokenFilters() != null) {
List<TokenFilterName> tokenFilters =
obj.getTokenFilters().stream().map(TokenFilterNameConverter::map).collect(Collectors.toList());
analyzeTextRequest.setTokenFilters(tokenFilters);
analyzeTextOptions.setTokenFilters(tokenFilters);
}

String text = obj.getText();
analyzeTextRequest.setText(text);
analyzeTextOptions.setText(text);

if (obj.getTokenizer() != null) {
LexicalTokenizerName tokenizer = LexicalTokenizerNameConverter.map(obj.getTokenizer());
analyzeTextRequest.setTokenizer(tokenizer);
analyzeTextOptions.setTokenizer(tokenizer);
}
return analyzeTextRequest;
return analyzeTextOptions;
}

/**
* Maps from {@link AnalyzeTextRequest} to {@link com.azure.search.documents.indexes.implementation.models.AnalyzeRequest}.
* Maps from {@link AnalyzeTextOptions} to {@link com.azure.search.documents.indexes.implementation.models.AnalyzeRequest}.
*/
public static com.azure.search.documents.indexes.implementation.models.AnalyzeRequest map(AnalyzeTextRequest obj) {
public static com.azure.search.documents.indexes.implementation.models.AnalyzeRequest map(AnalyzeTextOptions obj) {
if (obj == null) {
return null;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -192,27 +192,27 @@ private static SearchField enrichWithAnnotation(SearchField searchField, java.la
.setFacetable(searchableFieldPropertyAnnotation.isFacetable())
.setKey(searchableFieldPropertyAnnotation.isKey())
.setHidden(searchableFieldPropertyAnnotation.isHidden());
String analyzer = searchableFieldPropertyAnnotation.analyzer();
String searchAnalyzer = searchableFieldPropertyAnnotation.searchAnalyzer();
String analyzer = searchableFieldPropertyAnnotation.analyzerName();
String searchAnalyzer = searchableFieldPropertyAnnotation.searchAnalyzerName();
String indexAnalyzer = searchableFieldPropertyAnnotation.indexAnalyzer();
if (!analyzer.isEmpty() && (!searchAnalyzer.isEmpty() || !indexAnalyzer.isEmpty())) {
throw logger.logExceptionAsError(new RuntimeException(
"Please specify either analyzer or both searchAnalyzer and indexAnalyzer."));
}
if (!searchableFieldPropertyAnnotation.analyzer().isEmpty()) {
if (!searchableFieldPropertyAnnotation.analyzerName().isEmpty()) {
searchField.setAnalyzerName(LexicalAnalyzerName.fromString(
searchableFieldPropertyAnnotation.analyzer()));
searchableFieldPropertyAnnotation.analyzerName()));
}
if (!searchableFieldPropertyAnnotation.searchAnalyzer().isEmpty()) {
if (!searchableFieldPropertyAnnotation.searchAnalyzerName().isEmpty()) {
searchField.setAnalyzerName(LexicalAnalyzerName.fromString(
searchableFieldPropertyAnnotation.searchAnalyzer()));
searchableFieldPropertyAnnotation.searchAnalyzerName()));
}
if (!searchableFieldPropertyAnnotation.indexAnalyzer().isEmpty()) {
searchField.setAnalyzerName(LexicalAnalyzerName.fromString(
searchableFieldPropertyAnnotation.indexAnalyzer()));
}
if (searchableFieldPropertyAnnotation.synonymMaps().length != 0) {
List<String> synonymMaps = Arrays.stream(searchableFieldPropertyAnnotation.synonymMaps())
if (searchableFieldPropertyAnnotation.synonymMapNames().length != 0) {
List<String> synonymMaps = Arrays.stream(searchableFieldPropertyAnnotation.synonymMapNames())
.filter(synonym -> !synonym.trim().isEmpty()).collect(Collectors.toList());
searchField.setSynonymMapNames(synonymMaps);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
import com.azure.search.documents.indexes.implementation.SearchServiceRestClientImpl;
import com.azure.search.documents.indexes.implementation.models.ListIndexesResult;
import com.azure.search.documents.indexes.implementation.models.ListSynonymMapsResult;
import com.azure.search.documents.indexes.models.AnalyzeTextRequest;
import com.azure.search.documents.indexes.models.AnalyzeTextOptions;
import com.azure.search.documents.indexes.models.AnalyzedTokenInfo;
import com.azure.search.documents.indexes.models.SearchIndexStatistics;
import com.azure.search.documents.indexes.models.SearchIndex;
Expand Down Expand Up @@ -408,49 +408,49 @@ Mono<Response<Void>> deleteIndexWithResponse(String indexName, String etag, Requ
* Shows how an analyzer breaks text into tokens.
*
* @param indexName the name of the index for which to test an analyzer
* @param analyzeTextRequest the text and analyzer or analysis components to test
* @param analyzeTextOptions the text and analyzer or analysis components to test
* @return analyze result.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<AnalyzedTokenInfo> analyzeText(String indexName, AnalyzeTextRequest analyzeTextRequest) {
return analyzeText(indexName, analyzeTextRequest, null);
public PagedFlux<AnalyzedTokenInfo> analyzeText(String indexName, AnalyzeTextOptions analyzeTextOptions) {
return analyzeText(indexName, analyzeTextOptions, null);
}

/**
* Shows how an analyzer breaks text into tokens.
*
* @param indexName the name of the index for which to test an analyzer
* @param analyzeTextRequest the text and analyzer or analysis components to test
* @param analyzeTextOptions the text and analyzer or analysis components to test
* @param requestOptions additional parameters for the operation. Contains the tracking ID sent with the request to
* help with debugging
* @return a response containing analyze result.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<AnalyzedTokenInfo> analyzeText(String indexName, AnalyzeTextRequest analyzeTextRequest,
public PagedFlux<AnalyzedTokenInfo> analyzeText(String indexName, AnalyzeTextOptions analyzeTextOptions,
RequestOptions requestOptions) {
try {
return new PagedFlux<>(() ->
withContext(context -> analyzeTextWithResponse(indexName, analyzeTextRequest, requestOptions,
withContext(context -> analyzeTextWithResponse(indexName, analyzeTextOptions, requestOptions,
context)));
} catch (RuntimeException ex) {
return pagedFluxError(logger, ex);
}
}

PagedFlux<AnalyzedTokenInfo> analyzeText(String indexName, AnalyzeTextRequest analyzeTextRequest,
PagedFlux<AnalyzedTokenInfo> analyzeText(String indexName, AnalyzeTextOptions analyzeTextOptions,
RequestOptions requestOptions, Context context) {
try {
return new PagedFlux<>(() -> analyzeTextWithResponse(indexName, analyzeTextRequest, requestOptions,
return new PagedFlux<>(() -> analyzeTextWithResponse(indexName, analyzeTextOptions, requestOptions,
context));
} catch (RuntimeException ex) {
return pagedFluxError(logger, ex);
}
}

private Mono<PagedResponse<AnalyzedTokenInfo>> analyzeTextWithResponse(String indexName,
AnalyzeTextRequest analyzeTextRequest, RequestOptions requestOptions, Context context) {
AnalyzeTextOptions analyzeTextOptions, RequestOptions requestOptions, Context context) {
return restClient.indexes()
.analyzeWithRestResponseAsync(indexName, AnalyzeRequestConverter.map(analyzeTextRequest),
.analyzeWithRestResponseAsync(indexName, AnalyzeRequestConverter.map(analyzeTextOptions),
RequestOptionsIndexesConverter.map(requestOptions), context)
.onErrorMap(MappingUtils::exceptionMapper)
.map(MappingUtils::mappingTokenInfo);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
import com.azure.core.http.rest.Response;
import com.azure.core.util.Context;
import com.azure.search.documents.SearchClient;
import com.azure.search.documents.indexes.models.AnalyzeTextRequest;
import com.azure.search.documents.indexes.models.AnalyzeTextOptions;
import com.azure.search.documents.indexes.models.AnalyzedTokenInfo;
import com.azure.search.documents.indexes.models.SearchIndexStatistics;
import com.azure.search.documents.indexes.models.SearchIndex;
Expand Down Expand Up @@ -247,28 +247,28 @@ public Response<Void> deleteIndexWithResponse(SearchIndex index, boolean onlyIfU
* Shows how an analyzer breaks text into tokens.
*
* @param indexName the name of the index for which to test an analyzer
* @param analyzeTextRequest the text and analyzer or analysis components to test
* @param analyzeTextOptions the text and analyzer or analysis components to test
* @return analyze result.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<AnalyzedTokenInfo> analyzeText(String indexName, AnalyzeTextRequest analyzeTextRequest) {
return analyzeText(indexName, analyzeTextRequest, null, Context.NONE);
public PagedIterable<AnalyzedTokenInfo> analyzeText(String indexName, AnalyzeTextOptions analyzeTextOptions) {
return analyzeText(indexName, analyzeTextOptions, null, Context.NONE);
}

/**
* Shows how an analyzer breaks text into tokens.
*
* @param indexName the name of the index for which to test an analyzer
* @param analyzeTextRequest the text and analyzer or analysis components to test
* @param analyzeTextOptions the text and analyzer or analysis components to test
* @param requestOptions additional parameters for the operation. Contains the tracking ID sent with the request to
* help with debugging
* @param context additional context that is passed through the HTTP pipeline during the service call
* @return analyze result.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<AnalyzedTokenInfo> analyzeText(String indexName, AnalyzeTextRequest analyzeTextRequest,
public PagedIterable<AnalyzedTokenInfo> analyzeText(String indexName, AnalyzeTextOptions analyzeTextOptions,
RequestOptions requestOptions, Context context) {
return new PagedIterable<>(asyncClient.analyzeText(indexName, analyzeTextRequest, requestOptions, context));
return new PagedIterable<>(asyncClient.analyzeText(indexName, analyzeTextOptions, requestOptions, context));
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,14 +57,14 @@
*
* @return {@link LexicalAnalyzerName} String value. Or default to "null" String type.
*/
String analyzer() default "";
String analyzerName() default "";

/**
* Optional arguments defines the name of the search analyzer used for the field.
*
* @return {@link LexicalAnalyzerName} String value. Or default to an empty String.
*/
String searchAnalyzer() default "";
String searchAnalyzerName() default "";

/**
* Optional arguments defines the name of the analyzer used for the field.
Expand All @@ -83,5 +83,5 @@
*
* @return An array of synonym map values. Or default to empty string array.
*/
String[] synonymMaps() default {};
String[] synonymMapNames() default {};
}
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
* tokens.
*/
@Fluent
public final class AnalyzeTextRequest {
public final class AnalyzeTextOptions {
/*
* The text to break into tokens.
*/
Expand Down Expand Up @@ -88,7 +88,7 @@ public String getText() {
* @param text the text value to set.
* @return the AnalyzeRequest object itself.
*/
public AnalyzeTextRequest setText(String text) {
public AnalyzeTextOptions setText(String text) {
this.text = text;
return this;
}
Expand Down Expand Up @@ -154,7 +154,7 @@ public LexicalAnalyzerName getAnalyzer() {
* @param analyzer the analyzer value to set.
* @return the AnalyzeRequest object itself.
*/
public AnalyzeTextRequest setAnalyzer(LexicalAnalyzerName analyzer) {
public AnalyzeTextOptions setAnalyzer(LexicalAnalyzerName analyzer) {
this.analyzer = analyzer;
return this;
}
Expand Down Expand Up @@ -186,7 +186,7 @@ public LexicalTokenizerName getTokenizer() {
* @param tokenizer the tokenizer value to set.
* @return the AnalyzeRequest object itself.
*/
public AnalyzeTextRequest setTokenizer(LexicalTokenizerName tokenizer) {
public AnalyzeTextOptions setTokenizer(LexicalTokenizerName tokenizer) {
this.tokenizer = tokenizer;
return this;
}
Expand All @@ -210,7 +210,7 @@ public List<TokenFilterName> getTokenFilters() {
* @param tokenFilters the tokenFilters value to set.
* @return the AnalyzeRequest object itself.
*/
public AnalyzeTextRequest setTokenFilters(List<TokenFilterName> tokenFilters) {
public AnalyzeTextOptions setTokenFilters(List<TokenFilterName> tokenFilters) {
this.tokenFilters = tokenFilters;
return this;
}
Expand All @@ -234,7 +234,7 @@ public List<CharFilterName> getCharFilters() {
* @param charFilters the charFilters value to set.
* @return the AnalyzeRequest object itself.
*/
public AnalyzeTextRequest setCharFilters(List<CharFilterName> charFilters) {
public AnalyzeTextOptions setCharFilters(List<CharFilterName> charFilters) {
this.charFilters = charFilters;
return this;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import com.azure.search.documents.SearchClient;
import com.azure.search.documents.SearchDocument;
import com.azure.search.documents.SearchTestBase;
import com.azure.search.documents.indexes.models.AnalyzeTextRequest;
import com.azure.search.documents.indexes.models.AnalyzeTextOptions;
import com.azure.search.documents.indexes.models.AnalyzedTokenInfo;
import com.azure.search.documents.indexes.models.AsciiFoldingTokenFilter;
import com.azure.search.documents.indexes.models.CharFilter;
Expand Down Expand Up @@ -204,7 +204,7 @@ public void canAnalyze() {
searchIndexClient.createIndex(index);
indexesToCleanup.add(index.getName());

AnalyzeTextRequest request = new AnalyzeTextRequest()
AnalyzeTextOptions request = new AnalyzeTextOptions()
.setText("One two")
.setAnalyzer(LexicalAnalyzerName.WHITESPACE);
PagedIterable<AnalyzedTokenInfo> results = searchIndexClient.analyzeText(index.getName(), request);
Expand All @@ -213,7 +213,7 @@ public void canAnalyze() {
assertTokenInfoEqual("two", 4, 7, 1, iterator.next());
assertFalse(iterator.hasNext());

request = new AnalyzeTextRequest()
request = new AnalyzeTextOptions()
.setText("One's <two/>")
.setTokenizer(LexicalTokenizerName.WHITESPACE)
.setTokenFilters(Collections.singletonList(TokenFilterName.APOSTROPHE))
Expand All @@ -239,19 +239,19 @@ public void canAnalyzeWithAllPossibleNames() {

LexicalAnalyzerName.values()
.stream()
.map(an -> new AnalyzeTextRequest()
.map(an -> new AnalyzeTextOptions()
.setText("One two")
.setAnalyzer(an))
.forEach(r -> searchIndexClient.analyzeText(index.getName(), r));

LexicalTokenizerName.values()
.stream()
.map(tn -> new AnalyzeTextRequest()
.map(tn -> new AnalyzeTextOptions()
.setText("One two")
.setTokenizer(tn))
.forEach(r -> searchIndexClient.analyzeText(index.getName(), r));

AnalyzeTextRequest request = new AnalyzeTextRequest()
AnalyzeTextOptions request = new AnalyzeTextOptions()
.setText("One two")
.setTokenizer(LexicalTokenizerName.WHITESPACE)
.setTokenFilters(new ArrayList<>(TokenFilterName.values()))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ public class Hotel {
@JsonProperty(value = "HotelId")
private String hotelId;

@SearchableFieldProperty(isSortable = true, analyzer = "en.lucene")
@SearchableFieldProperty(isSortable = true, analyzerName = "en.lucene")
@JsonProperty(value = "HotelName")
private String hotelName;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ public class HotelAddress {
@JsonProperty(value = "StateProvince")
private String stateProvince;

@SearchableFieldProperty(synonymMaps = {"fieldbuilder"})
@SearchableFieldProperty(synonymMapNames = {"fieldbuilder"})
@JsonProperty(value = "Country")
private String country;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import com.azure.search.documents.indexes.SearchableFieldProperty;

public class HotelAnalyzerException {
@SearchableFieldProperty(analyzer = "en.microsoft", indexAnalyzer = "whitespce")
@SearchableFieldProperty(analyzerName = "en.microsoft", indexAnalyzer = "whitespce")
private String tag;

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
* This is a class to test whether we filter out the empty String in synonymMaps.
*/
public class HotelWithEmptyInSynonymMaps {
@SearchableFieldProperty(synonymMaps = {"asynonymMaps", "", " ", "maps"})
@SearchableFieldProperty(synonymMapNames = {"asynonymMaps", "", " ", "maps"})
private List<String> tags;

/**
Expand Down