diff --git a/sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md b/sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md index 428ea497fe8b..2c7eec3cefbe 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md +++ b/sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md @@ -1,6 +1,6 @@ # Release History -## 1.0.0-beta.5 (Unreleased) +## 1.0.0-beta.5 (2020-05-27) **New features** - Added Text property and `getText()` to `SentenceSentiment`. - `Warnings` property added to each document-level response object returned from the endpoints. It is a list of `TextAnalyticsWarnings`. @@ -9,6 +9,8 @@ - Text analytics SDK update the service to version `v3.0` from `v3.0-preview.1`. **Breaking changes** +- Removed pagination feature, which removed `TextAnalyticsPagedIterable`, `TextAnalyticsPagedFlux` and `TextAnalyticsPagedResponse` +- Removed overload methods for API that takes a list of String, only keep max-overload API that has a list of String, language or country hint, and `TextAnalyticsRequestOption`. - Renamed `apiKey()` to `credential()` on TextAnalyticsClientBuilder. - Removed `getGraphemeLength()` and `getGraphemeOffset()` from `CategorizedEntity`, `SentenceSentiment`, and `LinkedEntityMatch`. - `getGraphemeCount()` in `TextDocumentStatistics` has been renamed to `getCharacterCount()`. diff --git a/sdk/textanalytics/azure-ai-textanalytics/README.md b/sdk/textanalytics/azure-ai-textanalytics/README.md index 116121544709..f20b7324e8f5 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/README.md +++ b/sdk/textanalytics/azure-ai-textanalytics/README.md @@ -18,6 +18,7 @@ and includes six main functions: - [Cognitive Services or Text Analytics account][text_analytics_account] to use this package. ### Include the Package +**Note:** This version targets Azure Text Analytics service API version v3.0. [//]: # ({x-version-update-start;com.azure:azure-ai-textanalytics;current}) ```xml @@ -146,8 +147,8 @@ or the number of operation transactions that have gone through, simply call `get `TextDocumentStatistics` which contains both information. ### Return value collection -An operation result collection, such as `TextAnalyticsPagedResponse`, which is the collection of -the result of a Text Analytics analyzing sentiment operation. For `TextAnalyticsPagedResponse` includes the model +An operation result collection, such as `AnalyzeSentimentResultCollection`, which is the collection of +the result of a Text Analytics analyzing sentiment operation. It also includes the model version of the operation and statistics of the batch documents. ### Operation on multiple documents @@ -280,7 +281,7 @@ List documents = Arrays.asList( ); try { - textAnalyticsClient.detectLanguageBatch(documents, null, Context.NONE); + textAnalyticsClient.detectLanguageBatchWithResponse(documents, null, Context.NONE); } catch (HttpResponseException e) { System.out.println(e.getMessage()); } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/AnalyzeSentimentAsyncClient.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/AnalyzeSentimentAsyncClient.java index ab8e384c532d..cc0ad672ecd8 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/AnalyzeSentimentAsyncClient.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/AnalyzeSentimentAsyncClient.java @@ -13,14 +13,16 @@ import com.azure.ai.textanalytics.implementation.models.SentimentResponse; import com.azure.ai.textanalytics.implementation.models.WarningCodeValue; import com.azure.ai.textanalytics.models.AnalyzeSentimentResult; +import com.azure.ai.textanalytics.util.AnalyzeSentimentResultCollection; import com.azure.ai.textanalytics.models.SentenceSentiment; import com.azure.ai.textanalytics.models.SentimentConfidenceScores; import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; import com.azure.ai.textanalytics.models.TextAnalyticsWarning; import com.azure.ai.textanalytics.models.TextDocumentInput; -import com.azure.ai.textanalytics.util.TextAnalyticsPagedFlux; -import com.azure.ai.textanalytics.util.TextAnalyticsPagedResponse; +import com.azure.ai.textanalytics.models.TextSentiment; +import com.azure.ai.textanalytics.models.WarningCode; import com.azure.core.exception.HttpResponseException; +import com.azure.core.http.rest.Response; import com.azure.core.http.rest.SimpleResponse; import com.azure.core.util.Context; import com.azure.core.util.IterableStream; @@ -39,7 +41,7 @@ import static com.azure.ai.textanalytics.implementation.Utility.toMultiLanguageInput; import static com.azure.ai.textanalytics.implementation.Utility.toTextAnalyticsError; import static com.azure.ai.textanalytics.implementation.Utility.toTextDocumentStatistics; -import static com.azure.core.util.FluxUtil.fluxError; +import static com.azure.core.util.FluxUtil.monoError; import static com.azure.core.util.FluxUtil.withContext; import static com.azure.core.util.tracing.Tracer.AZ_TRACING_NAMESPACE_KEY; @@ -61,55 +63,53 @@ class AnalyzeSentimentAsyncClient { } /** - * Helper function for calling service with max overloaded parameters that a returns {@link TextAnalyticsPagedFlux} - * which is a paged flux that contains {@link AnalyzeSentimentResult}. + * Helper function for calling service with max overloaded parameters that returns a mono {@link Response} + * which contains {@link AnalyzeSentimentResultCollection}. * * @param documents The list of documents to analyze sentiments for. * @param options The {@link TextAnalyticsRequestOptions} request options. * - * @return {@link TextAnalyticsPagedFlux} of {@link AnalyzeSentimentResult}. + * @return A mono {@link Response} contains {@link AnalyzeSentimentResultCollection}. */ - TextAnalyticsPagedFlux analyzeSentimentBatch(Iterable documents, - TextAnalyticsRequestOptions options) { + public Mono> analyzeSentimentBatch( + Iterable documents, TextAnalyticsRequestOptions options) { try { inputDocumentsValidation(documents); - return new TextAnalyticsPagedFlux<>(() -> (continuationToken, pageSize) -> withContext(context -> - getAnalyzedSentimentResponseInPage(documents, options, context)).flux()); + return withContext(context -> getAnalyzedSentimentResponse(documents, options, context)); } catch (RuntimeException ex) { - return new TextAnalyticsPagedFlux<>(() -> (continuationToken, pageSize) -> fluxError(logger, ex)); + return monoError(logger, ex); } } /** - * Helper function for calling service with max overloaded parameters that a returns {@link TextAnalyticsPagedFlux} - * which is a paged flux that contains {@link AnalyzeSentimentResult}. + * Helper function for calling service with max overloaded parameters that returns a mono {@link Response} + * which contains {@link AnalyzeSentimentResultCollection}. * * @param documents The list of documents to analyze sentiments for. * @param options The {@link TextAnalyticsRequestOptions} request options. * @param context Additional context that is passed through the Http pipeline during the service call. * - * @return The {@link TextAnalyticsPagedFlux} of {@link AnalyzeSentimentResult}. + * @return A mono {@link Response} contains {@link AnalyzeSentimentResultCollection}. */ - TextAnalyticsPagedFlux analyzeSentimentBatchWithContext( + Mono> analyzeSentimentBatchWithContext( Iterable documents, TextAnalyticsRequestOptions options, Context context) { try { inputDocumentsValidation(documents); - return new TextAnalyticsPagedFlux<>(() -> (continuationToken, pageSize) -> - getAnalyzedSentimentResponseInPage(documents, options, context).flux()); + return getAnalyzedSentimentResponse(documents, options, context); } catch (RuntimeException ex) { - return new TextAnalyticsPagedFlux<>(() -> (continuationToken, pageSize) -> fluxError(logger, ex)); + return monoError(logger, ex); } } /** - * Helper method to convert the service response of {@link SentimentResponse} to {@link TextAnalyticsPagedResponse} - * of {@link AnalyzeSentimentResult}. + * Helper method to convert the service response of {@link SentimentResponse} to {@link Response} that contains + * {@link AnalyzeSentimentResultCollection}. * * @param response The {@link SimpleResponse} of {@link SentimentResponse} returned by the service. * - * @return The {@link TextAnalyticsPagedResponse} of {@link AnalyzeSentimentResult} returned by the SDK. + * @return A {@link Response} contains {@link AnalyzeSentimentResultCollection}. */ - private TextAnalyticsPagedResponse toTextAnalyticsPagedResponse( + private Response toAnalyzeSentimentResultCollectionResponse( SimpleResponse response) { final SentimentResponse sentimentResponse = response.getValue(); final List analyzeSentimentResults = new ArrayList<>(); @@ -130,11 +130,9 @@ private TextAnalyticsPagedResponse toTextAnalyticsPagedR analyzeSentimentResults.add(new AnalyzeSentimentResult(documentError.getId(), null, toTextAnalyticsError(documentError.getError()), null)); } - return new TextAnalyticsPagedResponse<>( - response.getRequest(), response.getStatusCode(), response.getHeaders(), - analyzeSentimentResults, null, - sentimentResponse.getModelVersion(), - sentimentResponse.getStatistics() == null ? null : toBatchStatistics(sentimentResponse.getStatistics())); + return new SimpleResponse<>(response, + new AnalyzeSentimentResultCollection(analyzeSentimentResults, sentimentResponse.getModelVersion(), + sentimentResponse.getStatistics() == null ? null : toBatchStatistics(sentimentResponse.getStatistics()))); } /** @@ -154,7 +152,7 @@ private AnalyzeSentimentResult convertToAnalyzeSentimentResult(DocumentSentiment sentenceSentiment.getConfidenceScores(); final SentenceSentimentValue sentenceSentimentValue = sentenceSentiment.getSentiment(); return new SentenceSentiment(sentenceSentiment.getText(), - sentenceSentimentValue == null ? null : sentenceSentimentValue.toString(), + TextSentiment.fromString(sentenceSentimentValue == null ? null : sentenceSentimentValue.toString()), new SentimentConfidenceScores(confidenceScorePerSentence.getNegative(), confidenceScorePerSentence.getNeutral(), confidenceScorePerSentence.getPositive())); }).collect(Collectors.toList()); @@ -163,7 +161,8 @@ private AnalyzeSentimentResult convertToAnalyzeSentimentResult(DocumentSentiment final List warnings = documentSentiment.getWarnings().stream().map( warning -> { final WarningCodeValue warningCodeValue = warning.getCode(); - return new TextAnalyticsWarning(warningCodeValue == null ? null : warningCodeValue.toString(), + return new TextAnalyticsWarning( + WarningCode.fromString(warningCodeValue == null ? null : warningCodeValue.toString()), warning.getMessage()); }).collect(Collectors.toList()); @@ -174,7 +173,7 @@ private AnalyzeSentimentResult convertToAnalyzeSentimentResult(DocumentSentiment ? null : toTextDocumentStatistics(documentSentiment.getStatistics()), null, new com.azure.ai.textanalytics.models.DocumentSentiment( - documentSentimentValue == null ? null : documentSentimentValue.toString(), + TextSentiment.fromString(documentSentimentValue == null ? null : documentSentimentValue.toString()), new SentimentConfidenceScores( confidenceScorePerLabel.getNegative(), confidenceScorePerLabel.getNeutral(), @@ -184,16 +183,16 @@ private AnalyzeSentimentResult convertToAnalyzeSentimentResult(DocumentSentiment } /** - * Call the service with REST response, convert to a {@link Mono} of {@link TextAnalyticsPagedResponse} of - * {@link AnalyzeSentimentResult} from a {@link SimpleResponse} of {@link SentimentResponse}. + * Call the service with REST response, convert to a {@link Mono} of {@link Response} which contains + * {@link AnalyzeSentimentResultCollection} from a {@link SimpleResponse} of {@link SentimentResponse}. * * @param documents A list of documents to be analyzed. * @param options The {@link TextAnalyticsRequestOptions} request options. * @param context Additional context that is passed through the Http pipeline during the service call. * - * @return A {@link Mono} of {@link TextAnalyticsPagedResponse} of {@link AnalyzeSentimentResult}. + * @return A mono {@link Response} contains {@link AnalyzeSentimentResultCollection}. */ - private Mono> getAnalyzedSentimentResponseInPage( + private Mono> getAnalyzedSentimentResponse( Iterable documents, TextAnalyticsRequestOptions options, Context context) { return service.sentimentWithResponseAsync( new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)), @@ -203,7 +202,7 @@ private Mono> getAnalyzedSent .doOnSubscribe(ignoredValue -> logger.info("A batch of documents - {}", documents.toString())) .doOnSuccess(response -> logger.info("Analyzed sentiment for a batch of documents - {}", response)) .doOnError(error -> logger.warning("Failed to analyze sentiment - {}", error)) - .map(this::toTextAnalyticsPagedResponse) + .map(this::toAnalyzeSentimentResultCollectionResponse) .onErrorMap(throwable -> mapToHttpResponseExceptionIfExist(throwable)); } } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/DetectLanguageAsyncClient.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/DetectLanguageAsyncClient.java index 97b6a21cb165..942844ba1ff2 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/DetectLanguageAsyncClient.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/DetectLanguageAsyncClient.java @@ -11,12 +11,13 @@ import com.azure.ai.textanalytics.implementation.models.WarningCodeValue; import com.azure.ai.textanalytics.models.DetectLanguageInput; import com.azure.ai.textanalytics.models.DetectLanguageResult; +import com.azure.ai.textanalytics.util.DetectLanguageResultCollection; import com.azure.ai.textanalytics.models.DetectedLanguage; import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; import com.azure.ai.textanalytics.models.TextAnalyticsWarning; -import com.azure.ai.textanalytics.util.TextAnalyticsPagedFlux; -import com.azure.ai.textanalytics.util.TextAnalyticsPagedResponse; +import com.azure.ai.textanalytics.models.WarningCode; import com.azure.core.exception.HttpResponseException; +import com.azure.core.http.rest.Response; import com.azure.core.http.rest.SimpleResponse; import com.azure.core.util.Context; import com.azure.core.util.IterableStream; @@ -35,7 +36,7 @@ import static com.azure.ai.textanalytics.implementation.Utility.toLanguageInput; import static com.azure.ai.textanalytics.implementation.Utility.toTextAnalyticsError; import static com.azure.ai.textanalytics.implementation.Utility.toTextDocumentStatistics; -import static com.azure.core.util.FluxUtil.fluxError; +import static com.azure.core.util.FluxUtil.monoError; import static com.azure.core.util.FluxUtil.withContext; import static com.azure.core.util.tracing.Tracer.AZ_TRACING_NAMESPACE_KEY; @@ -57,55 +58,51 @@ class DetectLanguageAsyncClient { } /** - * Helper function for calling service with max overloaded parameters that a returns {@link TextAnalyticsPagedFlux} - * which is a paged flux that contains {@link DetectLanguageResult}. + * Helper function for calling service with max overloaded parameters. * * @param documents The list of documents to detect languages for. * @param options The {@link TextAnalyticsRequestOptions} request options. * - * @return The {@link TextAnalyticsPagedFlux} of {@link DetectLanguageResult}. + * @return A mono {@link Response} that contains {@link DetectLanguageResultCollection}. */ - TextAnalyticsPagedFlux detectLanguageBatch(Iterable documents, - TextAnalyticsRequestOptions options) { + Mono> detectLanguageBatch( + Iterable documents, TextAnalyticsRequestOptions options) { try { inputDocumentsValidation(documents); - return new TextAnalyticsPagedFlux<>(() -> (continuationToken, pageSize) -> withContext(context -> - getDetectedLanguageResponseInPage(documents, options, context)).flux()); + return withContext(context -> getDetectedLanguageResponse(documents, options, context)); } catch (RuntimeException ex) { - return new TextAnalyticsPagedFlux<>(() -> (continuationToken, pageSize) -> fluxError(logger, ex)); + return monoError(logger, ex); } } /** - * Helper function for calling service with max overloaded parameters with {@link Context} that a returns - * {@link TextAnalyticsPagedFlux} which is a paged flux that contains {@link DetectLanguageResult}. + * Helper function for calling service with max overloaded parameters with {@link Context}. * * @param documents The list of documents to detect languages for. * @param options The {@link TextAnalyticsRequestOptions} request options. * @param context Additional context that is passed through the Http pipeline during the service call. * - * @return The {@link TextAnalyticsPagedFlux} of {@link DetectLanguageResult}. + * @return A mono {@link Response} which contains {@link DetectLanguageResultCollection}. */ - TextAnalyticsPagedFlux detectLanguageBatchWithContext( + Mono> detectLanguageBatchWithContext( Iterable documents, TextAnalyticsRequestOptions options, Context context) { try { inputDocumentsValidation(documents); - return new TextAnalyticsPagedFlux<>(() -> (continuationToken, pageSize) -> - getDetectedLanguageResponseInPage(documents, options, context).flux()); + return getDetectedLanguageResponse(documents, options, context); } catch (RuntimeException ex) { - return new TextAnalyticsPagedFlux<>(() -> (continuationToken, pageSize) -> fluxError(logger, ex)); + return monoError(logger, ex); } } /** - * Helper method to convert the service response of {@link LanguageResult} to {@link TextAnalyticsPagedResponse} - * of {@link DetectLanguageResult}. + * Helper method to convert the service response of {@link LanguageResult} to {@link Response} that contains + * {@link DetectLanguageResultCollection}. * * @param response the {@link SimpleResponse} of {@link LanguageResult} returned by the service. * - * @return the {@link TextAnalyticsPagedResponse} of {@link DetectLanguageResult} to be returned by the SDK. + * @return A {@link Response} that contains {@link DetectLanguageResultCollection}. */ - private TextAnalyticsPagedResponse toTextAnalyticsPagedResponse( + private Response toTextAnalyticsResultDocumentResponse( SimpleResponse response) { final LanguageResult languageResult = response.getValue(); final List detectLanguageResults = new ArrayList<>(); @@ -117,7 +114,8 @@ private TextAnalyticsPagedResponse toTextAnalyticsPagedRes final List warnings = documentLanguage.getWarnings().stream() .map(warning -> { final WarningCodeValue warningCodeValue = warning.getCode(); - return new TextAnalyticsWarning(warningCodeValue == null ? null : warningCodeValue.toString(), + return new TextAnalyticsWarning( + WarningCode.fromString(warningCodeValue == null ? null : warningCodeValue.toString()), warning.getMessage()); }).collect(Collectors.toList()); @@ -147,27 +145,22 @@ private TextAnalyticsPagedResponse toTextAnalyticsPagedRes toTextAnalyticsError(documentError.getError()), null)); } - return new TextAnalyticsPagedResponse<>( - response.getRequest(), - response.getStatusCode(), - response.getHeaders(), - detectLanguageResults, - null, - languageResult.getModelVersion(), - languageResult.getStatistics() == null ? null : toBatchStatistics(languageResult.getStatistics())); + return new SimpleResponse<>(response, + new DetectLanguageResultCollection(detectLanguageResults, languageResult.getModelVersion(), + languageResult.getStatistics() == null ? null : toBatchStatistics(languageResult.getStatistics()))); } /** - * Call the service with REST response, convert to a {@link Mono} of {@link TextAnalyticsPagedResponse} of + * Call the service with REST response, convert to a {@link Mono} of {@link Response} of * {@link DetectLanguageResult} from a {@link SimpleResponse} of {@link LanguageResult}. * * @param documents The list of documents to detect languages for. * @param options The {@link TextAnalyticsRequestOptions} request options. * @param context Additional context that is passed through the Http pipeline during the service call. * - * @return A {@link Mono} of {@link TextAnalyticsPagedResponse} of {@link DetectLanguageResult}. + * @return A mono {@link Response} that contains {@link DetectLanguageResultCollection}. */ - private Mono> getDetectedLanguageResponseInPage( + private Mono> getDetectedLanguageResponse( Iterable documents, TextAnalyticsRequestOptions options, Context context) { return service.languagesWithResponseAsync( new LanguageBatchInput().setDocuments(toLanguageInput(documents)), @@ -178,8 +171,7 @@ private Mono> getDetectedLangua .doOnSuccess(response -> logger.info("Detected languages for a batch of documents - {}", response.getValue())) .doOnError(error -> logger.warning("Failed to detect language - {}", error)) - .map(this::toTextAnalyticsPagedResponse) + .map(this::toTextAnalyticsResultDocumentResponse) .onErrorMap(throwable -> mapToHttpResponseExceptionIfExist(throwable)); - } } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/ExtractKeyPhraseAsyncClient.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/ExtractKeyPhraseAsyncClient.java index 0b95a130db97..28bc6dd54f59 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/ExtractKeyPhraseAsyncClient.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/ExtractKeyPhraseAsyncClient.java @@ -15,9 +15,10 @@ import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; import com.azure.ai.textanalytics.models.TextAnalyticsWarning; import com.azure.ai.textanalytics.models.TextDocumentInput; -import com.azure.ai.textanalytics.util.TextAnalyticsPagedFlux; -import com.azure.ai.textanalytics.util.TextAnalyticsPagedResponse; +import com.azure.ai.textanalytics.models.WarningCode; +import com.azure.ai.textanalytics.util.ExtractKeyPhrasesResultCollection; import com.azure.core.exception.HttpResponseException; +import com.azure.core.http.rest.Response; import com.azure.core.http.rest.SimpleResponse; import com.azure.core.util.Context; import com.azure.core.util.IterableStream; @@ -39,7 +40,6 @@ import static com.azure.ai.textanalytics.implementation.Utility.toTextAnalyticsError; import static com.azure.ai.textanalytics.implementation.Utility.toTextAnalyticsException; import static com.azure.ai.textanalytics.implementation.Utility.toTextDocumentStatistics; -import static com.azure.core.util.FluxUtil.fluxError; import static com.azure.core.util.FluxUtil.monoError; import static com.azure.core.util.FluxUtil.withContext; import static com.azure.core.util.tracing.Tracer.AZ_TRACING_NAMESPACE_KEY; @@ -62,7 +62,7 @@ class ExtractKeyPhraseAsyncClient { } /** - * Helper function for calling service with max overloaded parameters that a returns {@link KeyPhrasesCollection}. + * Helper function for calling service with max overloaded parameters that returns a {@link KeyPhrasesCollection}. * * @param document A document. * @param language The language code. @@ -74,73 +74,75 @@ Mono extractKeyPhrasesSingleText(String document, String l Objects.requireNonNull(document, "'document' cannot be null."); final TextDocumentInput textDocumentInput = new TextDocumentInput("0", document); textDocumentInput.setLanguage(language); - return extractKeyPhrases(Collections.singletonList(textDocumentInput), null) - .map(keyPhraseResult -> { - if (keyPhraseResult.isError()) { - throw logger.logExceptionAsError(toTextAnalyticsException(keyPhraseResult.getError())); + return extractKeyPhrasesWithResponse(Collections.singletonList(textDocumentInput), null) + .map(resultCollectionResponse -> { + KeyPhrasesCollection keyPhrasesCollection = null; + // for each loop will have only one entry inside + for (ExtractKeyPhraseResult keyPhraseResult : resultCollectionResponse.getValue()) { + if (keyPhraseResult.isError()) { + throw logger.logExceptionAsError(toTextAnalyticsException(keyPhraseResult.getError())); + } + keyPhrasesCollection = new KeyPhrasesCollection(keyPhraseResult.getKeyPhrases(), + keyPhraseResult.getKeyPhrases().getWarnings()); } - return new KeyPhrasesCollection(keyPhraseResult.getKeyPhrases(), - keyPhraseResult.getKeyPhrases().getWarnings()); - }).last(); + return keyPhrasesCollection; + }); + } catch (RuntimeException ex) { return monoError(logger, ex); } } /** - * Helper function for calling service with max overloaded parameters that a returns {@link TextAnalyticsPagedFlux} - * which is a paged flux that contains {@link ExtractKeyPhraseResult}. + * Helper function for calling service with max overloaded parameters with {@link Response}. * * @param documents A list of documents to extract key phrases for. * @param options The {@link TextAnalyticsRequestOptions} request options. * - * @return The {@link TextAnalyticsPagedFlux} of {@link ExtractKeyPhraseResult}. + * @return A mono {@link Response} that contains {@link ExtractKeyPhrasesResultCollection}. */ - TextAnalyticsPagedFlux extractKeyPhrases(Iterable documents, - TextAnalyticsRequestOptions options) { + Mono> extractKeyPhrasesWithResponse( + Iterable documents, TextAnalyticsRequestOptions options) { try { inputDocumentsValidation(documents); - return new TextAnalyticsPagedFlux<>(() -> (continuationToken, pageSize) -> withContext(context -> - getExtractedKeyPhrasesResponseInPage(documents, options, context)).flux()); + return withContext(context -> getExtractedKeyPhrasesResponse(documents, options, context)); } catch (RuntimeException ex) { - return new TextAnalyticsPagedFlux<>(() -> (continuationToken, pageSize) -> fluxError(logger, ex)); + return monoError(logger, ex); } } /** - * Helper function for calling service with max overloaded parameters that a returns {@link TextAnalyticsPagedFlux} - * which is a paged flux that contains {@link ExtractKeyPhraseResult}. + * Helper function for calling service with max overloaded parameters that returns a {@link Response} + * which contains {@link ExtractKeyPhrasesResultCollection}. * * @param documents A list of documents to extract key phrases for. * @param options The {@link TextAnalyticsRequestOptions} request options. * @param context Additional context that is passed through the Http pipeline during the service call. * - * @return The {@link TextAnalyticsPagedFlux} of {@link ExtractKeyPhraseResult}. + * @return A mono {@link Response} which contains {@link ExtractKeyPhrasesResultCollection}. */ - TextAnalyticsPagedFlux extractKeyPhrasesBatchWithContext( + Mono> extractKeyPhrasesBatchWithContext( Iterable documents, TextAnalyticsRequestOptions options, Context context) { try { inputDocumentsValidation(documents); - return new TextAnalyticsPagedFlux<>(() -> (continuationToken, pageSize) -> - getExtractedKeyPhrasesResponseInPage(documents, options, context).flux()); + return getExtractedKeyPhrasesResponse(documents, options, context); } catch (RuntimeException ex) { - return new TextAnalyticsPagedFlux<>(() -> (continuationToken, pageSize) -> fluxError(logger, ex)); + return monoError(logger, ex); } } /** - * Helper method to convert the service response of {@link KeyPhraseResult} to {@link TextAnalyticsPagedResponse} - * of {@link ExtractKeyPhraseResult}. + * Helper method to convert the service response of {@link KeyPhraseResult} to {@link Response} + * which contains {@link ExtractKeyPhrasesResultCollection}. * * @param response the {@link SimpleResponse} returned by the service. * - * @return the {@link TextAnalyticsPagedResponse} of {@link ExtractKeyPhraseResult} to be returned by the SDK. + * @return A {@link Response} which contains {@link ExtractKeyPhrasesResultCollection}. */ - private TextAnalyticsPagedResponse toTextAnalyticsPagedResponse( + private Response toExtractKeyPhrasesResultCollectionResponse( final SimpleResponse response) { - final KeyPhraseResult keyPhraseResult = response.getValue(); - + // List of documents results final List keyPhraseResultList = new ArrayList<>(); for (DocumentKeyPhrases documentKeyPhrases : keyPhraseResult.getDocuments()) { final String documentId = documentKeyPhrases.getId(); @@ -152,7 +154,8 @@ private TextAnalyticsPagedResponse toTextAnalyticsPagedR new IterableStream<>(documentKeyPhrases.getKeyPhrases()), new IterableStream<>(documentKeyPhrases.getWarnings().stream().map(warning -> { final WarningCodeValue warningCodeValue = warning.getCode(); - return new TextAnalyticsWarning(warningCodeValue == null ? null : warningCodeValue.toString(), + return new TextAnalyticsWarning( + WarningCode.fromString(warningCodeValue == null ? null : warningCodeValue.toString()), warning.getMessage()); }).collect(Collectors.toList()))))); } @@ -175,27 +178,23 @@ private TextAnalyticsPagedResponse toTextAnalyticsPagedR documentId, null, error, null)); } - return new TextAnalyticsPagedResponse<>( - response.getRequest(), - response.getStatusCode(), - response.getHeaders(), - keyPhraseResultList, - null, - keyPhraseResult.getModelVersion(), keyPhraseResult.getStatistics() == null ? null - : toBatchStatistics(keyPhraseResult.getStatistics())); + return new SimpleResponse<>(response, + new ExtractKeyPhrasesResultCollection(keyPhraseResultList, keyPhraseResult.getModelVersion(), + keyPhraseResult.getStatistics() == null ? null + : toBatchStatistics(keyPhraseResult.getStatistics()))); } /** - * Call the service with REST response, convert to a {@link Mono} of {@link TextAnalyticsPagedResponse} of - * {@link ExtractKeyPhraseResult} from a {@link SimpleResponse} of {@link KeyPhraseResult}. + * Call the service with REST response, convert to a {@link Mono} of {@link Response} which contains + * {@link ExtractKeyPhrasesResultCollection} from a {@link SimpleResponse} of {@link KeyPhraseResult}. * * @param documents A list of documents to extract key phrases for. * @param options The {@link TextAnalyticsRequestOptions} request options. * @param context Additional context that is passed through the Http pipeline during the service call. * - * @return A {@link Mono} of {@link TextAnalyticsPagedResponse} of {@link ExtractKeyPhraseResult}. + * @return A mono {@link Response} that contains {@link ExtractKeyPhrasesResultCollection}. */ - private Mono> getExtractedKeyPhrasesResponseInPage( + private Mono> getExtractedKeyPhrasesResponse( Iterable documents, TextAnalyticsRequestOptions options, Context context) { return service.keyPhrasesWithResponseAsync( new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)), @@ -205,7 +204,7 @@ private Mono> getExtractedKey .doOnSubscribe(ignoredValue -> logger.info("A batch of document - {}", documents.toString())) .doOnSuccess(response -> logger.info("A batch of key phrases output - {}", response.getValue())) .doOnError(error -> logger.warning("Failed to extract key phrases - {}", error)) - .map(this::toTextAnalyticsPagedResponse) + .map(this::toExtractKeyPhrasesResultCollectionResponse) .onErrorMap(throwable -> mapToHttpResponseExceptionIfExist(throwable)); } } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/RecognizeEntityAsyncClient.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/RecognizeEntityAsyncClient.java index 249f17ff28c5..30a026f24a6d 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/RecognizeEntityAsyncClient.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/RecognizeEntityAsyncClient.java @@ -9,13 +9,15 @@ import com.azure.ai.textanalytics.implementation.models.WarningCodeValue; import com.azure.ai.textanalytics.models.CategorizedEntity; import com.azure.ai.textanalytics.models.CategorizedEntityCollection; +import com.azure.ai.textanalytics.models.EntityCategory; import com.azure.ai.textanalytics.models.RecognizeEntitiesResult; +import com.azure.ai.textanalytics.util.RecognizeEntitiesResultCollection; import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; import com.azure.ai.textanalytics.models.TextAnalyticsWarning; import com.azure.ai.textanalytics.models.TextDocumentInput; -import com.azure.ai.textanalytics.util.TextAnalyticsPagedFlux; -import com.azure.ai.textanalytics.util.TextAnalyticsPagedResponse; +import com.azure.ai.textanalytics.models.WarningCode; import com.azure.core.exception.HttpResponseException; +import com.azure.core.http.rest.Response; import com.azure.core.http.rest.SimpleResponse; import com.azure.core.util.Context; import com.azure.core.util.IterableStream; @@ -37,7 +39,6 @@ import static com.azure.ai.textanalytics.implementation.Utility.toTextAnalyticsError; import static com.azure.ai.textanalytics.implementation.Utility.toTextAnalyticsException; import static com.azure.ai.textanalytics.implementation.Utility.toTextDocumentStatistics; -import static com.azure.core.util.FluxUtil.fluxError; import static com.azure.core.util.FluxUtil.monoError; import static com.azure.core.util.FluxUtil.withContext; import static com.azure.core.util.tracing.Tracer.AZ_TRACING_NAMESPACE_KEY; @@ -60,8 +61,8 @@ class RecognizeEntityAsyncClient { } /** - * Helper function for calling service with max overloaded parameters that a returns {@link Mono} - * which is a paged flux that contains {@link CategorizedEntityCollection}. + * Helper function for calling service with max overloaded parameters that returns a {@link Mono} + * which contains {@link CategorizedEntityCollection}. * * @param document A single document. * @param language The language code. @@ -74,70 +75,69 @@ Mono recognizeEntities(String document, String lang final TextDocumentInput textDocumentInput = new TextDocumentInput("0", document); textDocumentInput.setLanguage(language); return recognizeEntitiesBatch(Collections.singletonList(textDocumentInput), null) - .map(entitiesResult -> { + .map(resultCollectionResponse -> { + CategorizedEntityCollection entityCollection = null; + // for each loop will have only one entry inside + for (RecognizeEntitiesResult entitiesResult : resultCollectionResponse.getValue()) { if (entitiesResult.isError()) { throw logger.logExceptionAsError(toTextAnalyticsException(entitiesResult.getError())); } - return new CategorizedEntityCollection(entitiesResult.getEntities(), + entityCollection = new CategorizedEntityCollection(entitiesResult.getEntities(), entitiesResult.getEntities().getWarnings()); - }).last(); + } + return entityCollection; + }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** - * Helper function for calling service with max overloaded parameters that a returns {@link TextAnalyticsPagedFlux} - * which is a paged flux that contains {@link RecognizeEntitiesResult}. + * Helper function for calling service with max overloaded parameters. * * @param documents The list of documents to recognize entities for. * @param options The {@link TextAnalyticsRequestOptions} request options. * - * @return The {@link TextAnalyticsPagedFlux} of {@link RecognizeEntitiesResult}. + * @return A mono {@link Response} that contains {@link RecognizeEntitiesResultCollection}. */ - TextAnalyticsPagedFlux recognizeEntitiesBatch( + Mono> recognizeEntitiesBatch( Iterable documents, TextAnalyticsRequestOptions options) { try { inputDocumentsValidation(documents); - return new TextAnalyticsPagedFlux<>(() -> (continuationToken, pageSize) -> withContext(context -> - getRecognizedEntitiesResponseInPage(documents, options, context)).flux()); + return withContext(context -> getRecognizedEntitiesResponse(documents, options, context)); } catch (RuntimeException ex) { - return new TextAnalyticsPagedFlux<>(() -> (continuationToken, pageSize) -> fluxError(logger, ex)); + return monoError(logger, ex); } } /** - * Helper function for calling service with max overloaded parameters that a returns {@link TextAnalyticsPagedFlux} - * which is a paged flux that contains {@link RecognizeEntitiesResult}. + * Helper function for calling service with max overloaded parameters with {@link Context} is given. * * @param documents The list of documents to recognize entities for. * @param options The {@link TextAnalyticsRequestOptions} request options. * @param context Additional context that is passed through the Http pipeline during the service call. * - * @return the {@link TextAnalyticsPagedFlux} of {@link RecognizeEntitiesResult} to be returned by - * the SDK. + * @return A mono {@link Response} that contains {@link RecognizeEntitiesResultCollection}. */ - TextAnalyticsPagedFlux recognizeEntitiesBatchWithContext( + Mono> recognizeEntitiesBatchWithContext( Iterable documents, TextAnalyticsRequestOptions options, Context context) { try { inputDocumentsValidation(documents); - return new TextAnalyticsPagedFlux<>(() -> (continuationToken, pageSize) -> - getRecognizedEntitiesResponseInPage(documents, options, context).flux()); + return getRecognizedEntitiesResponse(documents, options, context); } catch (RuntimeException ex) { - return new TextAnalyticsPagedFlux<>(() -> (continuationToken, pageSize) -> fluxError(logger, ex)); + return monoError(logger, ex); } } /** - * Helper method to convert the service response of {@link EntitiesResult} to {@link TextAnalyticsPagedResponse}. - * of {@link RecognizeEntitiesResult}} + * Helper method to convert the service response of {@link EntitiesResult} to {@link Response} which contains + * {@link RecognizeEntitiesResultCollection}. * * @param response the {@link SimpleResponse} of {@link EntitiesResult} returned by the service. * - * @return the {@link TextAnalyticsPagedResponse} of {@link RecognizeEntitiesResult} to be returned - * by the SDK. + * @return A {@link Response} that contains {@link RecognizeEntitiesResultCollection}. */ - private TextAnalyticsPagedResponse toTextAnalyticsPagedResponse( + private Response toRecognizeEntitiesResultCollectionResponse( final SimpleResponse response) { EntitiesResult entitiesResult = response.getValue(); // List of documents results @@ -150,14 +150,15 @@ private TextAnalyticsPagedResponse toTextAnalyticsPaged null, new CategorizedEntityCollection( new IterableStream<>(documentEntities.getEntities().stream().map(entity -> - new CategorizedEntity(entity.getText(), entity.getCategory(), + new CategorizedEntity(entity.getText(), EntityCategory.fromString(entity.getCategory()), entity.getSubcategory(), entity.getConfidenceScore())) .collect(Collectors.toList())), new IterableStream<>(documentEntities.getWarnings().stream() .map(warning -> { final WarningCodeValue warningCodeValue = warning.getCode(); return new TextAnalyticsWarning( - warningCodeValue == null ? null : warningCodeValue.toString(), warning.getMessage()); + WarningCode.fromString(warningCodeValue == null ? null : warningCodeValue.toString()), + warning.getMessage()); }).collect(Collectors.toList()))) ))); // Document errors @@ -178,23 +179,22 @@ private TextAnalyticsPagedResponse toTextAnalyticsPaged toTextAnalyticsError(documentError.getError()), null)); }); - return new TextAnalyticsPagedResponse<>( - response.getRequest(), response.getStatusCode(), response.getHeaders(), - recognizeEntitiesResults, null, entitiesResult.getModelVersion(), - entitiesResult.getStatistics() == null ? null : toBatchStatistics(entitiesResult.getStatistics())); + return new SimpleResponse<>(response, + new RecognizeEntitiesResultCollection(recognizeEntitiesResults, entitiesResult.getModelVersion(), + entitiesResult.getStatistics() == null ? null : toBatchStatistics(entitiesResult.getStatistics()))); } /** - * Call the service with REST response, convert to a {@link Mono} of {@link TextAnalyticsPagedResponse} of - * {@link RecognizeEntitiesResult} from a {@link SimpleResponse} of {@link EntitiesResult}. + * Call the service with REST response, convert to a {@link Mono} of {@link Response} that contains + * {@link RecognizeEntitiesResultCollection} from a {@link SimpleResponse} of {@link EntitiesResult}. * * @param documents The list of documents to recognize entities for. * @param options The {@link TextAnalyticsRequestOptions} request options. * @param context Additional context that is passed through the Http pipeline during the service call. * - * @return A {@link Mono} of {@link TextAnalyticsPagedResponse} of {@link RecognizeEntitiesResult}. + * @return A mono {@link Response} that contains {@link RecognizeEntitiesResultCollection}. */ - private Mono> getRecognizedEntitiesResponseInPage( + private Mono> getRecognizedEntitiesResponse( Iterable documents, TextAnalyticsRequestOptions options, Context context) { return service.entitiesRecognitionGeneralWithResponseAsync( new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)), @@ -205,7 +205,7 @@ private Mono> getRecognizedE .doOnSuccess(response -> logger.info("Recognized entities for a batch of documents- {}", response.getValue())) .doOnError(error -> logger.warning("Failed to recognize entities - {}", error)) - .map(this::toTextAnalyticsPagedResponse) + .map(this::toRecognizeEntitiesResultCollectionResponse) .onErrorMap(throwable -> mapToHttpResponseExceptionIfExist(throwable)); } } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/RecognizeLinkedEntityAsyncClient.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/RecognizeLinkedEntityAsyncClient.java index 508c7406490b..035762c54c6b 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/RecognizeLinkedEntityAsyncClient.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/RecognizeLinkedEntityAsyncClient.java @@ -14,9 +14,10 @@ import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; import com.azure.ai.textanalytics.models.TextAnalyticsWarning; import com.azure.ai.textanalytics.models.TextDocumentInput; -import com.azure.ai.textanalytics.util.TextAnalyticsPagedFlux; -import com.azure.ai.textanalytics.util.TextAnalyticsPagedResponse; +import com.azure.ai.textanalytics.models.WarningCode; +import com.azure.ai.textanalytics.util.RecognizeLinkedEntitiesResultCollection; import com.azure.core.exception.HttpResponseException; +import com.azure.core.http.rest.Response; import com.azure.core.http.rest.SimpleResponse; import com.azure.core.util.Context; import com.azure.core.util.IterableStream; @@ -38,7 +39,6 @@ import static com.azure.ai.textanalytics.implementation.Utility.toTextAnalyticsError; import static com.azure.ai.textanalytics.implementation.Utility.toTextAnalyticsException; import static com.azure.ai.textanalytics.implementation.Utility.toTextDocumentStatistics; -import static com.azure.core.util.FluxUtil.fluxError; import static com.azure.core.util.FluxUtil.monoError; import static com.azure.core.util.FluxUtil.withContext; import static com.azure.core.util.tracing.Tracer.AZ_TRACING_NAMESPACE_KEY; @@ -61,7 +61,7 @@ class RecognizeLinkedEntityAsyncClient { } /** - * Helper function for calling service with max overloaded parameters that a returns {@link LinkedEntityCollection}. + * Helper function for calling service with max overloaded parameters that returns a {@link LinkedEntityCollection}. * * @param document A single document. * @param language The language code. @@ -74,69 +74,72 @@ Mono recognizeLinkedEntities(String document, String lan final TextDocumentInput textDocumentInput = new TextDocumentInput("0", document); textDocumentInput.setLanguage(language); return recognizeLinkedEntitiesBatch(Collections.singletonList(textDocumentInput), null) - .map(entitiesResult -> { + .map(resultCollectionResponse -> { + LinkedEntityCollection linkedEntityCollection = null; + // for each loop will have only one entry inside + for (RecognizeLinkedEntitiesResult entitiesResult : resultCollectionResponse.getValue()) { if (entitiesResult.isError()) { throw logger.logExceptionAsError(toTextAnalyticsException(entitiesResult.getError())); } - return new LinkedEntityCollection(entitiesResult.getEntities(), + linkedEntityCollection = new LinkedEntityCollection(entitiesResult.getEntities(), entitiesResult.getEntities().getWarnings()); - }).last(); + } + return linkedEntityCollection; + }); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** - * Helper function for calling service with max overloaded parameters that a returns {@link TextAnalyticsPagedFlux} - * which is a paged flux that contains {@link RecognizeLinkedEntitiesResult}. + * Helper function for calling service with max overloaded parameters that returns a mono {@link Response} + * which contains {@link RecognizeLinkedEntitiesResultCollection}. * * @param documents The list of documents to recognize linked entities for. * @param options The {@link TextAnalyticsRequestOptions} request options. * - * @return The {@link TextAnalyticsPagedFlux} of {@link RecognizeLinkedEntitiesResult}. + * @return A mono {@link Response} that contains {@link RecognizeLinkedEntitiesResultCollection}. */ - TextAnalyticsPagedFlux recognizeLinkedEntitiesBatch( + Mono> recognizeLinkedEntitiesBatch( Iterable documents, TextAnalyticsRequestOptions options) { try { inputDocumentsValidation(documents); - return new TextAnalyticsPagedFlux<>(() -> (continuationToken, pageSize) -> withContext(context -> - getRecognizedLinkedEntitiesResponseInPage(documents, options, context)).flux()); + return withContext(context -> getRecognizedLinkedEntitiesResponse(documents, options, context)); } catch (RuntimeException ex) { - return new TextAnalyticsPagedFlux<>(() -> (continuationToken, pageSize) -> fluxError(logger, ex)); + return monoError(logger, ex); } } /** - * Helper function for calling service with max overloaded parameters that a returns {@link TextAnalyticsPagedFlux} - * which is a paged flux that contains {@link RecognizeLinkedEntitiesResult}. + * Helper function for calling service with max overloaded parameters that returns a mono {@link Response} + * which contains {@link RecognizeLinkedEntitiesResultCollection}. * * @param documents The list of documents to recognize linked entities for. * @param options The {@link TextAnalyticsRequestOptions} request options. * @param context Additional context that is passed through the Http pipeline during the service call. * - * @return the {@link TextAnalyticsPagedFlux} of {@link RecognizeLinkedEntitiesResult} to be returned by the SDK. + * @return A mono {@link Response} that contains {@link RecognizeLinkedEntitiesResultCollection}. */ - TextAnalyticsPagedFlux recognizeLinkedEntitiesBatchWithContext( - Iterable documents, TextAnalyticsRequestOptions options, Context context) { + Mono> + recognizeLinkedEntitiesBatchWithContext(Iterable documents, + TextAnalyticsRequestOptions options, Context context) { try { inputDocumentsValidation(documents); - return new TextAnalyticsPagedFlux<>(() -> (continuationToken, pageSize) -> - getRecognizedLinkedEntitiesResponseInPage(documents, options, context).flux()); + return getRecognizedLinkedEntitiesResponse(documents, options, context); } catch (RuntimeException ex) { - return new TextAnalyticsPagedFlux<>(() -> (continuationToken, pageSize) -> fluxError(logger, ex)); + return monoError(logger, ex); } } /** * Helper method to convert the service response of {@link EntityLinkingResult} to - * {@link TextAnalyticsPagedResponse} of {@link RecognizeLinkedEntitiesResult} + * {@link Response} which contains {@link RecognizeLinkedEntitiesResultCollection}. * * @param response the {@link SimpleResponse} of {@link EntityLinkingResult} returned by the service. * - * @return the {@link TextAnalyticsPagedResponse} of {@link RecognizeLinkedEntitiesResult} to be returned - * by the SDK. + * @return A {@link Response} that contains {@link RecognizeLinkedEntitiesResultCollection}. */ - private TextAnalyticsPagedResponse toTextAnalyticsPagedResponse( + private Response toRecognizeLinkedEntitiesResultCollectionResponse( final SimpleResponse response) { final EntityLinkingResult entityLinkingResult = response.getValue(); // List of documents results @@ -153,7 +156,8 @@ private TextAnalyticsPagedResponse toTextAnalytic .map(warning -> { final WarningCodeValue warningCodeValue = warning.getCode(); return new TextAnalyticsWarning( - warningCodeValue == null ? null : warningCodeValue.toString(), warning.getMessage()); + WarningCode.fromString(warningCodeValue == null ? null : warningCodeValue.toString()), + warning.getMessage()); }).collect(Collectors.toList()))) ))); // Document errors @@ -174,11 +178,10 @@ private TextAnalyticsPagedResponse toTextAnalytic toTextAnalyticsError(documentError.getError()), null)); }); - return new TextAnalyticsPagedResponse<>( - response.getRequest(), response.getStatusCode(), response.getHeaders(), - linkedEntitiesResults, null, entityLinkingResult.getModelVersion(), - entityLinkingResult.getStatistics() == null ? null - : toBatchStatistics(entityLinkingResult.getStatistics())); + return new SimpleResponse<>(response, + new RecognizeLinkedEntitiesResultCollection(linkedEntitiesResults, entityLinkingResult.getModelVersion(), + entityLinkingResult.getStatistics() == null ? null + : toBatchStatistics(entityLinkingResult.getStatistics()))); } private IterableStream mapLinkedEntity( @@ -197,16 +200,17 @@ private IterableStream mapLinkedEntity( } /** - * Call the service with REST response, convert to a {@link Mono} of {@link TextAnalyticsPagedResponse} of - * {@link RecognizeLinkedEntitiesResult} from a {@link SimpleResponse} of {@link EntityLinkingResult}. + * Call the service with REST response, convert to a {@link Mono} of {@link Response} which contains + * {@link RecognizeLinkedEntitiesResultCollection} from a {@link SimpleResponse} of {@link EntityLinkingResult}. * * @param documents The list of documents to recognize linked entities for. * @param options The {@link TextAnalyticsRequestOptions} request options. * @param context Additional context that is passed through the Http pipeline during the service call. - * @return A {@link Mono} of {@link TextAnalyticsPagedResponse} of {@link RecognizeLinkedEntitiesResult}. + * @return A mono {@link Response} that contains {@link RecognizeLinkedEntitiesResultCollection}. */ - private Mono> getRecognizedLinkedEntitiesResponseInPage( - Iterable documents, TextAnalyticsRequestOptions options, Context context) { + private Mono> + getRecognizedLinkedEntitiesResponse(Iterable documents, TextAnalyticsRequestOptions options, + Context context) { return service.entitiesLinkingWithResponseAsync( new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)), context.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE), @@ -216,7 +220,7 @@ private Mono> getRecog .doOnSuccess(response -> logger.info("Recognized linked entities for a batch of documents - {}", response.getValue())) .doOnError(error -> logger.warning("Failed to recognize linked entities - {}", error)) - .map(this::toTextAnalyticsPagedResponse) + .map(this::toRecognizeLinkedEntitiesResultCollectionResponse) .onErrorMap(throwable -> mapToHttpResponseExceptionIfExist(throwable)); } } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsAsyncClient.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsAsyncClient.java index 482eb6858f67..82d0b23e456a 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsAsyncClient.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsAsyncClient.java @@ -5,27 +5,27 @@ import com.azure.ai.textanalytics.implementation.TextAnalyticsClientImpl; import com.azure.ai.textanalytics.models.AnalyzeSentimentResult; -import com.azure.ai.textanalytics.models.CategorizedEntity; import com.azure.ai.textanalytics.models.CategorizedEntityCollection; import com.azure.ai.textanalytics.models.DetectLanguageInput; import com.azure.ai.textanalytics.models.DetectLanguageResult; import com.azure.ai.textanalytics.models.DetectedLanguage; import com.azure.ai.textanalytics.models.DocumentSentiment; -import com.azure.ai.textanalytics.models.ExtractKeyPhraseResult; import com.azure.ai.textanalytics.models.KeyPhrasesCollection; -import com.azure.ai.textanalytics.models.LinkedEntity; import com.azure.ai.textanalytics.models.LinkedEntityCollection; -import com.azure.ai.textanalytics.models.RecognizeEntitiesResult; -import com.azure.ai.textanalytics.models.RecognizeLinkedEntitiesResult; import com.azure.ai.textanalytics.models.TextAnalyticsError; import com.azure.ai.textanalytics.models.TextAnalyticsException; import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; import com.azure.ai.textanalytics.models.TextDocumentInput; -import com.azure.ai.textanalytics.util.TextAnalyticsPagedFlux; +import com.azure.ai.textanalytics.util.AnalyzeSentimentResultCollection; +import com.azure.ai.textanalytics.util.DetectLanguageResultCollection; +import com.azure.ai.textanalytics.util.ExtractKeyPhrasesResultCollection; +import com.azure.ai.textanalytics.util.RecognizeEntitiesResultCollection; +import com.azure.ai.textanalytics.util.RecognizeLinkedEntitiesResultCollection; import com.azure.core.annotation.ReturnType; import com.azure.core.annotation.ServiceClient; import com.azure.core.annotation.ServiceMethod; import com.azure.core.http.rest.Response; +import com.azure.core.util.FluxUtil; import com.azure.core.util.logging.ClientLogger; import reactor.core.publisher.Mono; @@ -34,7 +34,6 @@ import static com.azure.ai.textanalytics.implementation.Utility.mapByIndex; import static com.azure.ai.textanalytics.implementation.Utility.toTextAnalyticsException; -import static com.azure.core.util.FluxUtil.fluxError; import static com.azure.core.util.FluxUtil.monoError; /** @@ -161,71 +160,23 @@ public Mono detectLanguage(String document, String countryHint try { Objects.requireNonNull(document, "'document' cannot be null."); return detectLanguageBatch(Collections.singletonList(document), countryHint, null) - .map(detectLanguageResult -> { - if (detectLanguageResult.isError()) { - throw logger.logExceptionAsError(toTextAnalyticsException(detectLanguageResult.getError())); + .map(detectLanguageResultCollection -> { + DetectedLanguage detectedLanguage = null; + for (DetectLanguageResult detectLanguageResult : detectLanguageResultCollection) { + if (detectLanguageResult.isError()) { + throw logger.logExceptionAsError(toTextAnalyticsException(detectLanguageResult.getError())); + } + detectedLanguage = detectLanguageResult.getPrimaryLanguage(); } - return detectLanguageResult.getPrimaryLanguage(); - }).last(); + // When the detected language result collection is empty, + // return empty result for the empty collection returned by the service. + return detectedLanguage; + }); } catch (RuntimeException ex) { return monoError(logger, ex); } } - /** - * Returns the detected language for each of documents. - * - * This method will use the default country hint that sets up in - * {@link TextAnalyticsClientBuilder#defaultCountryHint(String)}. If none is specified, service will use 'US' as - * the country hint. - * - *

Code sample

- *

Detects language in a list of documents. Subscribes to the call asynchronously and prints out the - * detected language details when a response is received.

- * - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.detectLanguageBatch#Iterable} - * - * @param documents The list of documents to detect languages for. - * For text length limits, maximum batch size, and supported text encoding, see - * data limits. - * - * @return A {@link TextAnalyticsPagedFlux} contains a list of - * {@link DetectLanguageResult detected language document result}. - * - * @throws NullPointerException if {@code documents} is {@code null}. - */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedFlux detectLanguageBatch(Iterable documents) { - return detectLanguageBatch(documents, defaultCountryHint, null); - } - - /** - * Returns the detected language for each of documents with the provided country hint. - * - *

Code sample

- *

Detects language in a list of documents with a provided country hint for the batch. Subscribes to the - * call asynchronously and prints out the detected language details when a response is received.

- * - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.detectLanguageBatch#Iterable-String} - * - * @param documents The list of documents to detect languages for. - * For text length limits, maximum batch size, and supported text encoding, see - * data limits. - * @param countryHint Accepts two letter country codes specified by ISO 3166-1 alpha-2. Defaults to "US" if not - * specified. To remove this behavior you can reset this parameter by setting this value to empty string - * {@code countryHint} = "" or "none". - * - * @return A {@link TextAnalyticsPagedFlux} contains a list of - * {@link DetectLanguageResult detected language document result}. - * - * @throws NullPointerException if {@code documents} is {@code null}. - */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedFlux detectLanguageBatch( - Iterable documents, String countryHint) { - return detectLanguageBatch(documents, countryHint, null); - } - /** * Returns the detected language for each of documents with the provided country hint and request option. * @@ -245,13 +196,12 @@ public TextAnalyticsPagedFlux detectLanguageBatch( * @param options The {@link TextAnalyticsRequestOptions options} to configure the scoring model for documents * and show statistics. * - * @return A {@link TextAnalyticsPagedFlux} contains a list of - * {@link DetectLanguageResult detected language document result}. + * @return A {@link Mono} contains a {@link DetectLanguageResultCollection}. * * @throws NullPointerException if {@code documents} is {@code null}. */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedFlux detectLanguageBatch( + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono detectLanguageBatch( Iterable documents, String countryHint, TextAnalyticsRequestOptions options) { if (countryHint != null && countryHint.equalsIgnoreCase("none")) { @@ -259,11 +209,11 @@ public TextAnalyticsPagedFlux detectLanguageBatch( } final String finalCountryHint = countryHint; try { - return detectLanguageBatch( + return detectLanguageBatchWithResponse( mapByIndex(documents, (index, value) -> new DetectLanguageInput(index, value, finalCountryHint)), - options); + options).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { - return new TextAnalyticsPagedFlux<>(() -> (continuationToken, pageSize) -> fluxError(logger, ex)); + return monoError(logger, ex); } } @@ -282,13 +232,12 @@ public TextAnalyticsPagedFlux detectLanguageBatch( * @param options The {@link TextAnalyticsRequestOptions options} to configure the scoring model for documents * and show statistics. * - * @return A {@link TextAnalyticsPagedFlux} contains a list of - * {@link DetectLanguageResult detected language document result}. + * @return A {@link Mono} contains a {@link Response} which contains a {@link DetectLanguageResultCollection}. * * @throws NullPointerException if {@code documents} is {@code null}. */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedFlux detectLanguageBatch( + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> detectLanguageBatchWithResponse( Iterable documents, TextAnalyticsRequestOptions options) { return detectLanguageAsyncClient.detectLanguageBatch(documents, options); } @@ -314,8 +263,7 @@ public TextAnalyticsPagedFlux detectLanguageBatch( * For text length limits, maximum batch size, and supported text encoding, see * data limits. * - * @return A {@link TextAnalyticsPagedFlux} contains a list of - * {@link CategorizedEntity recognized categorized entities}. + * @return A {@link Mono} contains a {@link CategorizedEntityCollection recognized categorized entities collection}. * * @throws NullPointerException if {@code document} is {@code null}. * @throws TextAnalyticsException if the response returned with an {@link TextAnalyticsError error}. @@ -343,8 +291,7 @@ public Mono recognizeEntities(String document) { * @param language The 2 letter ISO 639-1 representation of language. If not set, uses "en" for English as * default. * - * @return A {@link TextAnalyticsPagedFlux} contains a list of - * {@link CategorizedEntity recognized categorized entities}. + * @return A {@link Mono} contains a {@link CategorizedEntityCollection recognized categorized entities collection}. * * @throws NullPointerException if {@code document} is {@code null}. * @throws TextAnalyticsException if the response returned with an {@link TextAnalyticsError error}. @@ -354,60 +301,6 @@ public Mono recognizeEntities(String document, Stri return recognizeEntityAsyncClient.recognizeEntities(document, language); } - /** - * Returns a list of general categorized entities for the provided list of documents. - * - * This method will use the default language that sets up in - * {@link TextAnalyticsClientBuilder#defaultLanguage(String)}. If none is specified, service will use 'en' as - * the language. - * - *

Code sample

- *

Recognize entities in a document. Subscribes to the call asynchronously and prints out the entity details - * when a response is received.

- * - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeCategorizedEntitiesBatch#Iterable} - * - * @param documents A list of documents to recognize entities for. - * For text length limits, maximum batch size, and supported text encoding, see - * data limits. - * - * @return A {@link TextAnalyticsPagedFlux} contains a list of - * {@link RecognizeEntitiesResult recognized categorized entities document result}. - * - * @throws NullPointerException if {@code documents} is {@code null}. - */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedFlux recognizeEntitiesBatch( - Iterable documents) { - return recognizeEntitiesBatch(documents, defaultLanguage, null); - } - - /** - * Returns a list of general categorized entities for the provided list of documents with provided language code. - * - *

Code sample

- *

Recognize entities in a document with the provided language code. Subscribes to the call asynchronously and - * prints out the entity details when a response is received.

- * - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeCategorizedEntitiesBatch#Iterable-String} - * - * @param documents A list of documents to recognize entities for. - * For text length limits, maximum batch size, and supported text encoding, see - * data limits. - * @param language The 2 letter ISO 639-1 representation of language. If not set, uses "en" for English as - * default. - * - * @return A {@link TextAnalyticsPagedFlux} contains a list of - * {@link RecognizeEntitiesResult recognized categorized entities document result}. - * - * @throws NullPointerException if {@code documents} is {@code null}. - */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedFlux recognizeEntitiesBatch( - Iterable documents, String language) { - return recognizeEntitiesBatch(documents, language, null); - } - /** * Returns a list of general categorized entities for the provided list of documents with the provided language code * and request options. @@ -426,23 +319,22 @@ public TextAnalyticsPagedFlux recognizeEntitiesBatch( * @param options The {@link TextAnalyticsRequestOptions options} to configure the scoring model for documents * and show statistics. * - * @return A {@link TextAnalyticsPagedFlux} contains a list of - * {@link RecognizeEntitiesResult recognized categorized entities document result}. + * @return A {@link Mono} contains a {@link RecognizeEntitiesResultCollection}. * * @throws NullPointerException if {@code documents} is {@code null}. */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedFlux recognizeEntitiesBatch( + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono recognizeEntitiesBatch( Iterable documents, String language, TextAnalyticsRequestOptions options) { try { - return recognizeEntitiesBatch( + return recognizeEntitiesBatchWithResponse( mapByIndex(documents, (index, value) -> { final TextDocumentInput textDocumentInput = new TextDocumentInput(index, value); textDocumentInput.setLanguage(language); return textDocumentInput; - }), options); + }), options).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { - return new TextAnalyticsPagedFlux<>(() -> (continuationToken, pageSize) -> fluxError(logger, ex)); + return monoError(logger, ex); } } @@ -462,13 +354,12 @@ public TextAnalyticsPagedFlux recognizeEntitiesBatch( * @param options The {@link TextAnalyticsRequestOptions options} to configure the scoring model for documents * and show statistics. * - * @return A {@link TextAnalyticsPagedFlux} contains a list of - * {@link RecognizeEntitiesResult recognized categorized entities document result}. + * @return A {@link Mono} contains a {@link Response} which contains a {@link RecognizeEntitiesResultCollection}. * * @throws NullPointerException if {@code documents} is {@code null}. */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedFlux recognizeEntitiesBatch( + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> recognizeEntitiesBatchWithResponse( Iterable documents, TextAnalyticsRequestOptions options) { return recognizeEntityAsyncClient.recognizeEntitiesBatch(documents, options); } @@ -492,7 +383,7 @@ public TextAnalyticsPagedFlux recognizeEntitiesBatch( * For text length limits, maximum batch size, and supported text encoding, see * data limits. * - * @return A {@link TextAnalyticsPagedFlux} contains a list of {@link LinkedEntity recognized linked entities}. + * @return A {@link Mono} contains a {@link LinkedEntityCollection recognized linked entities collection}. * * @throws NullPointerException if {@code document} is {@code null}. * @throws TextAnalyticsException if the response returned with an {@link TextAnalyticsError error}. @@ -517,7 +408,7 @@ public Mono recognizeLinkedEntities(String document) { * @param language The 2 letter ISO 639-1 representation of language for the document. If not set, uses "en" for * English as default. * - * @return A {@link TextAnalyticsPagedFlux} contains a list of {@link LinkedEntity recognized linked entities}. + * @return A {@link Mono} contains a {@link LinkedEntityCollection recognized linked entities collection}. * * @throws NullPointerException if {@code document} is {@code null}. * @throws TextAnalyticsException if the response returned with an {@link TextAnalyticsError error}. @@ -527,62 +418,6 @@ public Mono recognizeLinkedEntities(String document, Str return recognizeLinkedEntityAsyncClient.recognizeLinkedEntities(document, language); } - /** - * Returns a list of recognized entities with links to a well-known knowledge base for the list of documents. See - * this for supported languages in Text Analytics API. - * - * This method will use the default language that sets up in - * {@link TextAnalyticsClientBuilder#defaultLanguage(String)}. If none is specified, service will use 'en' as - * the language. - * - *

Recognize linked entities in a list of documents. Subscribes to the call asynchronously and prints out the - * entity details when a response is received.

- * - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeLinkedEntitiesBatch#Iterable} - * - * @param documents A list of documents to recognize linked entities for. - * For text length limits, maximum batch size, and supported text encoding, see - * data limits. - * - * @return A {@link TextAnalyticsPagedFlux} contains a list of - * {@link LinkedEntity recognized linked entities document result}. - * - * @throws NullPointerException if {@code documents} is {@code null}. - */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedFlux recognizeLinkedEntitiesBatch( - Iterable documents) { - return recognizeLinkedEntitiesBatch(documents, defaultLanguage, null); - } - - /** - * Returns a list of recognized entities with links to a well-known knowledge base for the list of documents with - * provided language code. - * - * See this for supported languages in Text Analytics API. - * - *

Recognize linked entities in a list of documents with provided language code. Subscribes to the call - * asynchronously and prints out the entity details when a response is received.

- * - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeLinkedEntitiesBatch#Iterable-String} - * - * @param documents A list of documents to recognize linked entities for. - * For text length limits, maximum batch size, and supported text encoding, see - * data limits. - * @param language The 2 letter ISO 639-1 representation of language for the text. If not set, uses "en" for - * English as default. - * - * @return A {@link TextAnalyticsPagedFlux} contains a list of - * {@link LinkedEntity recognized linked entities document result}. - * - * @throws NullPointerException if {@code documents} is {@code null}. - */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedFlux recognizeLinkedEntitiesBatch( - Iterable documents, String language) { - return recognizeLinkedEntitiesBatch(documents, language, null); - } - /** * Returns a list of recognized entities with links to a well-known knowledge base for the list of documents with * provided language code and request options. @@ -602,22 +437,21 @@ public TextAnalyticsPagedFlux recognizeLinkedEnti * @param options The {@link TextAnalyticsRequestOptions options} to configure the scoring model for documents * and show statistics. * - * @return A {@link TextAnalyticsPagedFlux} contains a list of - * {@link LinkedEntity recognized linked entities document result}. + * @return A {@link Mono} contains a {@link RecognizeLinkedEntitiesResultCollection}. * * @throws NullPointerException if {@code documents} is {@code null}. */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedFlux recognizeLinkedEntitiesBatch( + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono recognizeLinkedEntitiesBatch( Iterable documents, String language, TextAnalyticsRequestOptions options) { try { - return recognizeLinkedEntitiesBatch(mapByIndex(documents, (index, value) -> { + return recognizeLinkedEntitiesBatchWithResponse(mapByIndex(documents, (index, value) -> { final TextDocumentInput textDocumentInput = new TextDocumentInput(index, value); textDocumentInput.setLanguage(language); return textDocumentInput; - }), options); + }), options).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { - return new TextAnalyticsPagedFlux<>(() -> (continuationToken, pageSize) -> fluxError(logger, ex)); + return monoError(logger, ex); } } @@ -639,14 +473,15 @@ public TextAnalyticsPagedFlux recognizeLinkedEnti * @param options The {@link TextAnalyticsRequestOptions options} to configure the scoring model for documents * and show statistics. * - * @return A {@link TextAnalyticsPagedFlux} contains a list of - * {@link LinkedEntity recognized linked entities document result}. + * @return A {@link Mono} contains a {@link Response} which contains a + * {@link RecognizeLinkedEntitiesResultCollection}. * * @throws NullPointerException if {@code documents} is {@code null}. */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedFlux recognizeLinkedEntitiesBatch( - Iterable documents, TextAnalyticsRequestOptions options) { + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> + recognizeLinkedEntitiesBatchWithResponse(Iterable documents, + TextAnalyticsRequestOptions options) { return recognizeLinkedEntityAsyncClient.recognizeLinkedEntitiesBatch(documents, options); } @@ -668,7 +503,7 @@ public TextAnalyticsPagedFlux recognizeLinkedEnti * For text length limits, maximum batch size, and supported text encoding, see * data limits. * - * @return A {@link TextAnalyticsPagedFlux} contains a list of extracted key phrases. + * @return A {@link Mono} contains a {@link KeyPhrasesCollection}. * * @throws NullPointerException if {@code document} is {@code null}. * @throws TextAnalyticsException if the response returned with an {@link TextAnalyticsError error}. @@ -694,7 +529,7 @@ public Mono extractKeyPhrases(String document) { * @param language The 2 letter ISO 639-1 representation of language for the text. If not set, uses "en" for * English as default. * - * @return A {@link TextAnalyticsPagedFlux} contains a list of extracted key phrases. + * @return A {@link Mono} contains a {@link KeyPhrasesCollection} * * @throws NullPointerException if {@code document} is {@code null}. * @throws TextAnalyticsException if the response returned with an {@link TextAnalyticsError error}. @@ -704,59 +539,6 @@ public Mono extractKeyPhrases(String document, String lang return extractKeyPhraseAsyncClient.extractKeyPhrasesSingleText(document, language); } - /** - * Returns a list of strings denoting the key phrases in the document. - * - * This method will use the default language that sets up in - * {@link TextAnalyticsClientBuilder#defaultLanguage(String)}. If none is specified, service will use 'en' as - * the language. - * - *

Extract key phrases in a list of documents. Subscribes to the call asynchronously and prints out the - * key phrases when a response is received.

- * - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.extractKeyPhrasesBatch#Iterable} - * - * @param documents A list of documents to be analyzed. - * For text length limits, maximum batch size, and supported text encoding, see - * data limits. - * - * @return A {@link TextAnalyticsPagedFlux} contains a list of - * {@link ExtractKeyPhraseResult extracted key phrases document result}. - * - * @throws NullPointerException if {@code documents} is {@code null}. - */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedFlux extractKeyPhrasesBatch(Iterable documents) { - return extractKeyPhrasesBatch(documents, defaultLanguage, null); - } - - /** - * Returns a list of strings denoting the key phrases in the document with provided language code. - * - * See this for the list of enabled languages. - * - *

Extract key phrases in a list of documents with a provided language code. Subscribes to the call - * asynchronously and prints out the key phrases when a response is received.

- * - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.extractKeyPhrasesBatch#Iterable-String} - * - * @param documents A list of documents to be analyzed. - * For text length limits, maximum batch size, and supported text encoding, see - * data limits. - * @param language The 2 letter ISO 639-1 representation of language for the text. If not set, uses "en" for - * English as default. - * - * @return A {@link TextAnalyticsPagedFlux} contains a list of - * {@link ExtractKeyPhraseResult extracted key phrases document result}. - * - * @throws NullPointerException if {@code documents} is {@code null}. - */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedFlux extractKeyPhrasesBatch( - Iterable documents, String language) { - return extractKeyPhrasesBatch(documents, language, null); - } - /** * Returns a list of strings denoting the key phrases in the document with provided language code and request * options. @@ -776,23 +558,22 @@ public TextAnalyticsPagedFlux extractKeyPhrasesBatch( * @param options The {@link TextAnalyticsRequestOptions options} to configure the scoring model for documents * and show statistics. * - * @return A {@link TextAnalyticsPagedFlux} contains a list of - * {@link ExtractKeyPhraseResult extracted key phrases document result}. + * @return A {@link Mono} contains a {@link ExtractKeyPhrasesResultCollection}. * * @throws NullPointerException if {@code documents} is {@code null}. */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedFlux extractKeyPhrasesBatch( + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono extractKeyPhrasesBatch( Iterable documents, String language, TextAnalyticsRequestOptions options) { try { - return extractKeyPhrasesBatch( + return extractKeyPhrasesBatchWithResponse( mapByIndex(documents, (index, value) -> { final TextDocumentInput textDocumentInput = new TextDocumentInput(index, value); textDocumentInput.setLanguage(language); return textDocumentInput; - }), options); + }), options).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { - return new TextAnalyticsPagedFlux<>(() -> (continuationToken, pageSize) -> fluxError(logger, ex)); + return monoError(logger, ex); } } @@ -812,15 +593,14 @@ public TextAnalyticsPagedFlux extractKeyPhrasesBatch( * @param options The {@link TextAnalyticsRequestOptions options} to configure the scoring model for documents * and show statistics. * - * @return A {@link TextAnalyticsPagedFlux} contains a list of - * {@link ExtractKeyPhraseResult extracted key phrases document result}. + * @return A {@link Mono} contains a {@link Response} that contains a {@link ExtractKeyPhrasesResultCollection}. * * @throws NullPointerException if {@code documents} is {@code null}. */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedFlux extractKeyPhrasesBatch( + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> extractKeyPhrasesBatchWithResponse( Iterable documents, TextAnalyticsRequestOptions options) { - return extractKeyPhraseAsyncClient.extractKeyPhrases(documents, options); + return extractKeyPhraseAsyncClient.extractKeyPhrasesWithResponse(documents, options); } // Sentiment @@ -877,70 +657,23 @@ public Mono analyzeSentiment(String document, String language try { Objects.requireNonNull(document, "'document' cannot be null."); return analyzeSentimentBatch(Collections.singletonList(document), language, null) - .map(sentimentResult -> { - if (sentimentResult.isError()) { - throw logger.logExceptionAsError(toTextAnalyticsException(sentimentResult.getError())); + .map(sentimentResultCollection -> { + DocumentSentiment documentSentiment = null; + for (AnalyzeSentimentResult sentimentResult : sentimentResultCollection) { + if (sentimentResult.isError()) { + throw logger.logExceptionAsError(toTextAnalyticsException(sentimentResult.getError())); + } + documentSentiment = sentimentResult.getDocumentSentiment(); } - return sentimentResult.getDocumentSentiment(); - }).last(); + // When the sentiment result collection is empty, + // return empty result for the empty collection returned by the service. + return documentSentiment; + }); } catch (RuntimeException ex) { return monoError(logger, ex); } } - /** - * Returns a sentiment prediction, as well as confidence scores for each sentiment label (Positive, Negative, and - * Neutral) for the document and each sentence within it. - * - * This method will use the default language that sets up in - * {@link TextAnalyticsClientBuilder#defaultLanguage(String)}. If none is specified, service will use 'en' as - * the language. - * - *

Analyze sentiment in a list of documents. Subscribes to the call asynchronously and prints out the - * sentiment details when a response is received.

- * - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.analyzeSentimentBatch#Iterable} - * - * @param documents A list of documents to be analyzed. - * For text length limits, maximum batch size, and supported text encoding, see - * data limits. - * - * @return A {@link TextAnalyticsPagedFlux} contains a list of - * {@link AnalyzeSentimentResult analyzed text sentiment document result}. - * - * @throws NullPointerException if {@code documents} is {@code null}. - */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedFlux analyzeSentimentBatch(Iterable documents) { - return analyzeSentimentBatch(documents, defaultLanguage, null); - } - - /** - * Returns a sentiment prediction, as well as confidence scores for each sentiment label (Positive, Negative, and - * Neutral) for the document and each sentence within it. - * - *

Analyze sentiment in a list of documents with provided language code. Subscribes to the - * call asynchronously and prints out the sentiment details when a response is received.

- * - * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsAsyncClient.analyzeSentimentBatch#Iterable-String} - * - * @param documents A list of documents to be analyzed. - * For text length limits, maximum batch size, and supported text encoding, see - * data limits. - * @param language The 2 letter ISO 639-1 representation of language for the document. If not set, uses "en" for - * English as default. - * - * @return A {@link TextAnalyticsPagedFlux} contains a list of - * {@link AnalyzeSentimentResult analyzed text sentiment document result}. - * - * @throws NullPointerException if {@code documents} is {@code null}. - */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedFlux analyzeSentimentBatch( - Iterable documents, String language) { - return analyzeSentimentBatch(documents, language, null); - } - /** * Returns a sentiment prediction, as well as confidence scores for each sentiment label (Positive, Negative, and * Neutral) for the document and each sentence within it. @@ -958,23 +691,22 @@ public TextAnalyticsPagedFlux analyzeSentimentBatch( * @param options The {@link TextAnalyticsRequestOptions options} to configure the scoring model for documents * and show statistics. * - * @return A {@link TextAnalyticsPagedFlux} contains a list of - * {@link AnalyzeSentimentResult analyzed text sentiment document result}. + * @return A {@link Mono} contains a {@link AnalyzeSentimentResultCollection}. * * @throws NullPointerException if {@code documents} is {@code null}. */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedFlux analyzeSentimentBatch( + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono analyzeSentimentBatch( Iterable documents, String language, TextAnalyticsRequestOptions options) { try { - return analyzeSentimentBatch( + return analyzeSentimentBatchWithResponse( mapByIndex(documents, (index, value) -> { final TextDocumentInput textDocumentInput = new TextDocumentInput(index, value); textDocumentInput.setLanguage(language); return textDocumentInput; - }), options); + }), options).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { - return new TextAnalyticsPagedFlux<>(() -> (continuationToken, pageSize) -> fluxError(logger, ex)); + return monoError(logger, ex); } } @@ -993,13 +725,12 @@ public TextAnalyticsPagedFlux analyzeSentimentBatch( * @param options The {@link TextAnalyticsRequestOptions options} to configure the scoring model for documents * and show statistics. * - * @return A {@link TextAnalyticsPagedFlux} contains a list of - * {@link AnalyzeSentimentResult analyzed text sentiment document result}. + * @return A {@link Mono} contains a {@link Response} that contains a {@link AnalyzeSentimentResultCollection}. * * @throws NullPointerException if {@code documents} is {@code null}. */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedFlux analyzeSentimentBatch( + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> analyzeSentimentBatchWithResponse( Iterable documents, TextAnalyticsRequestOptions options) { return analyzeSentimentAsyncClient.analyzeSentimentBatch(documents, options); } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsClient.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsClient.java index 0856a72f4bd8..71e2427f70c0 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsClient.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/TextAnalyticsClient.java @@ -3,27 +3,27 @@ package com.azure.ai.textanalytics; -import com.azure.ai.textanalytics.models.AnalyzeSentimentResult; +import com.azure.ai.textanalytics.util.AnalyzeSentimentResultCollection; import com.azure.ai.textanalytics.models.CategorizedEntity; import com.azure.ai.textanalytics.models.CategorizedEntityCollection; import com.azure.ai.textanalytics.models.DetectLanguageInput; -import com.azure.ai.textanalytics.models.DetectLanguageResult; +import com.azure.ai.textanalytics.util.DetectLanguageResultCollection; import com.azure.ai.textanalytics.models.DetectedLanguage; import com.azure.ai.textanalytics.models.DocumentSentiment; -import com.azure.ai.textanalytics.models.ExtractKeyPhraseResult; +import com.azure.ai.textanalytics.util.ExtractKeyPhrasesResultCollection; import com.azure.ai.textanalytics.models.KeyPhrasesCollection; import com.azure.ai.textanalytics.models.LinkedEntity; import com.azure.ai.textanalytics.models.LinkedEntityCollection; -import com.azure.ai.textanalytics.models.RecognizeEntitiesResult; -import com.azure.ai.textanalytics.models.RecognizeLinkedEntitiesResult; +import com.azure.ai.textanalytics.util.RecognizeEntitiesResultCollection; +import com.azure.ai.textanalytics.util.RecognizeLinkedEntitiesResultCollection; import com.azure.ai.textanalytics.models.TextAnalyticsError; import com.azure.ai.textanalytics.models.TextAnalyticsException; -import com.azure.ai.textanalytics.util.TextAnalyticsPagedIterable; import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; import com.azure.ai.textanalytics.models.TextDocumentInput; import com.azure.core.annotation.ReturnType; import com.azure.core.annotation.ServiceClient; import com.azure.core.annotation.ServiceMethod; +import com.azure.core.http.rest.Response; import com.azure.core.util.Context; import java.util.Objects; @@ -123,60 +123,6 @@ public DetectedLanguage detectLanguage(String document, String countryHint) { return client.detectLanguage(document, countryHint).block(); } - /** - * Detects Language for a batch of documents. - * - * This method will use the default country hint that sets up in - * {@link TextAnalyticsClientBuilder#defaultCountryHint(String)}. If none is specified, service will use 'US' as - * the country hint. - * - *

Code Sample

- *

Detects the languages in a list of documents.

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguageBatch#Iterable} - * - * @param documents The list of documents to detect languages for. - * For text length limits, maximum batch size, and supported text encoding, see - * data limits. - * - * @return A {@link TextAnalyticsPagedIterable} contains a list of - * {@link DetectLanguageResult detected language document result}. - * - * @throws NullPointerException if {@code documents} is {@code null}. - * @throws IllegalArgumentException if {@code documents} is empty. - */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedIterable detectLanguageBatch(Iterable documents) { - inputDocumentsValidation(documents); - return new TextAnalyticsPagedIterable<>(client.detectLanguageBatch(documents)); - } - - /** - * Detects Language for a batch of document with provided country hint. - * - *

Code Sample

- *

Detects the language in a list of documents with a provided country hint.

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguageBatch#Iterable-String} - * - * @param documents The list of documents to detect languages for. - * For text length limits, maximum batch size, and supported text encoding, see - * data limits. - * @param countryHint Accepts two letter country codes specified by ISO 3166-1 alpha-2. Defaults to "US" if not - * specified. To remove this behavior you can reset this parameter by setting this value to empty string - * {@code countryHint} = "" or "none". - * - * @return A {@link TextAnalyticsPagedIterable} contains a list of - * {@link DetectLanguageResult detected language document result}. - * - * @throws NullPointerException if {@code documents} is {@code null}. - * @throws IllegalArgumentException if {@code documents} is empty. - */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedIterable detectLanguageBatch( - Iterable documents, String countryHint) { - inputDocumentsValidation(documents); - return new TextAnalyticsPagedIterable<>(client.detectLanguageBatch(documents, countryHint)); - } - /** * Detects Language for a batch of document with the provided country hint and request options. * @@ -193,17 +139,16 @@ public TextAnalyticsPagedIterable detectLanguageBatch( * @param options The {@link TextAnalyticsRequestOptions options} to configure the scoring model for documents * and show statistics. * - * @return A {@link TextAnalyticsPagedIterable} contains a list of - * {@link DetectLanguageResult detected language document result}. + * @return A {@link DetectLanguageResultCollection}. * * @throws NullPointerException if {@code documents} is {@code null}. * @throws IllegalArgumentException if {@code documents} is empty. */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedIterable detectLanguageBatch( + @ServiceMethod(returns = ReturnType.SINGLE) + public DetectLanguageResultCollection detectLanguageBatch( Iterable documents, String countryHint, TextAnalyticsRequestOptions options) { inputDocumentsValidation(documents); - return new TextAnalyticsPagedIterable<>(client.detectLanguageBatch(documents, countryHint, options)); + return client.detectLanguageBatch(documents, countryHint, options).block(); } /** @@ -221,18 +166,16 @@ public TextAnalyticsPagedIterable detectLanguageBatch( * and show statistics. * @param context Additional context that is passed through the Http pipeline during the service call. * - * @return A {@link TextAnalyticsPagedIterable} contains a list of - * {@link DetectLanguageResult detected language document result}. + * @return A {@link Response} that contains a {@link DetectLanguageResultCollection}. * * @throws NullPointerException if {@code documents} is {@code null}. * @throws IllegalArgumentException if {@code documents} is empty. */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedIterable detectLanguageBatch( + @ServiceMethod(returns = ReturnType.SINGLE) + public Response detectLanguageBatchWithResponse( Iterable documents, TextAnalyticsRequestOptions options, Context context) { inputDocumentsValidation(documents); - return new TextAnalyticsPagedIterable<>( - client.detectLanguageAsyncClient.detectLanguageBatchWithContext(documents, options, context)); + return client.detectLanguageAsyncClient.detectLanguageBatchWithContext(documents, options, context).block(); } // Categorized Entity @@ -291,58 +234,6 @@ public CategorizedEntityCollection recognizeEntities(String document, String lan return client.recognizeEntities(document, language).block(); } - /** - * Returns a list of general categorized entities for the provided list of documents. - * - * This method will use the default language that sets up in - * {@link TextAnalyticsClientBuilder#defaultLanguage(String)}. If none is specified, service will use 'en' as - * the language. - * - *

Code Sample

- *

Recognizes the entities in a list of documents.

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizeCategorizedEntitiesBatch#Iterable} - * - * @param documents A list of documents to recognize entities for. - * For text length limits, maximum batch size, and supported text encoding, see - * data limits. - * - * @return The {@link TextAnalyticsPagedIterable} contains a list of - * {@link RecognizeEntitiesResult recognized categorized entities document result}. - * - * @throws NullPointerException if {@code documents} is {@code null}. - * @throws IllegalArgumentException if {@code documents} is empty. - */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedIterable recognizeEntitiesBatch(Iterable documents) { - inputDocumentsValidation(documents); - return new TextAnalyticsPagedIterable<>(client.recognizeEntitiesBatch(documents)); - } - - /** - * Returns a list of general categorized entities for the provided list of documents with provided language code. - * - *

Code Sample

- *

Recognizes the entities in a list of documents with a provided language code.

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizeCategorizedEntitiesBatch#Iterable-String} - * - * @param documents A list of documents to recognize entities for. - * For text length limits, maximum batch size, and supported text encoding, see - * data limits. - * @param language The 2 letter ISO 639-1 representation of language. If not set, uses "en" for English as default. - * - * @return The {@link TextAnalyticsPagedIterable} contains a list of - * {@link RecognizeEntitiesResult recognized categorized entities document result}. - * - * @throws NullPointerException if {@code documents} is {@code null}. - * @throws IllegalArgumentException if {@code documents} is empty. - */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedIterable recognizeEntitiesBatch( - Iterable documents, String language) { - inputDocumentsValidation(documents); - return new TextAnalyticsPagedIterable<>(client.recognizeEntitiesBatch(documents, language)); - } - /** * Returns a list of general categorized entities for the provided list of documents with provided language code * and request options. @@ -358,17 +249,16 @@ public TextAnalyticsPagedIterable recognizeEntitiesBatc * @param options The {@link TextAnalyticsRequestOptions options} to configure the scoring model for documents * and show statistics. * - * @return The {@link TextAnalyticsPagedIterable} contains a list of - * {@link RecognizeEntitiesResult recognized categorized entities document result}. + * @return A {@link RecognizeEntitiesResultCollection}. * * @throws NullPointerException if {@code documents} is {@code null}. * @throws IllegalArgumentException if {@code documents} is empty. */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedIterable recognizeEntitiesBatch( + @ServiceMethod(returns = ReturnType.SINGLE) + public RecognizeEntitiesResultCollection recognizeEntitiesBatch( Iterable documents, String language, TextAnalyticsRequestOptions options) { inputDocumentsValidation(documents); - return new TextAnalyticsPagedIterable<>(client.recognizeEntitiesBatch(documents, language, options)); + return client.recognizeEntitiesBatch(documents, language, options).block(); } /** @@ -387,18 +277,16 @@ public TextAnalyticsPagedIterable recognizeEntitiesBatc * and show statistics. * @param context Additional context that is passed through the Http pipeline during the service call. * - * @return The {@link TextAnalyticsPagedIterable} contains a list of - * {@link RecognizeEntitiesResult recognized categorized entities document result}. + * @return A {@link Response} that contains a {@link RecognizeEntitiesResultCollection}. * * @throws NullPointerException if {@code documents} is {@code null}. * @throws IllegalArgumentException if {@code documents} is empty. */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedIterable recognizeEntitiesBatch( + @ServiceMethod(returns = ReturnType.SINGLE) + public Response recognizeEntitiesBatchWithResponse( Iterable documents, TextAnalyticsRequestOptions options, Context context) { inputDocumentsValidation(documents); - return new TextAnalyticsPagedIterable<>( - client.recognizeEntityAsyncClient.recognizeEntitiesBatchWithContext(documents, options, context)); + return client.recognizeEntityAsyncClient.recognizeEntitiesBatchWithContext(documents, options, context).block(); } // Linked Entities @@ -455,65 +343,6 @@ public LinkedEntityCollection recognizeLinkedEntities(String document, String la return client.recognizeLinkedEntities(document, language).block(); } - /** - * Returns a list of recognized entities with links to a well-known knowledge base for the list of documents. - * See this for supported languages in Text Analytics API. - * - * This method will use the default language that sets up in - * {@link TextAnalyticsClientBuilder#defaultLanguage(String)}. If none is specified, service will use 'en' as - * the language. - * - *

Code Sample

- *

Recognizes the linked entities in a list of documents.

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntitiesBatch#Iterable} - * - * @param documents A list of documents to recognize linked entities for. - * For text length limits, maximum batch size, and supported text encoding, see - * data limits. - * - * @return A {@link TextAnalyticsPagedIterable} of the - * {@link LinkedEntity recognized linked entities document result}. - * - * @throws NullPointerException if {@code documents} is {@code null}. - * @throws IllegalArgumentException if {@code documents} is empty. - */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedIterable recognizeLinkedEntitiesBatch( - Iterable documents) { - inputDocumentsValidation(documents); - return new TextAnalyticsPagedIterable<>(client.recognizeLinkedEntitiesBatch(documents)); - } - - /** - * Returns a list of recognized entities with links to a well-known knowledge base for the list of documents with - * provided language code. - * - * See this for supported languages in Text Analytics API. - * - *

Code Sample

- *

Recognizes the linked entities in a list of documents with a provided language code. - *

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntitiesBatch#Iterable-String} - * - * @param documents A list of documents to recognize linked entities for. - * For text length limits, maximum batch size, and supported text encoding, see - * data limits. - * @param language The 2 letter ISO 639-1 representation of language for the documents. If not set, uses "en" for - * English as default. - * - * @return A {@link TextAnalyticsPagedIterable} of the - * {@link LinkedEntity recognized linked entities document result}. - * - * @throws NullPointerException if {@code documents} is {@code null}. - * @throws IllegalArgumentException if {@code documents} is empty. - */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedIterable recognizeLinkedEntitiesBatch( - Iterable documents, String language) { - inputDocumentsValidation(documents); - return new TextAnalyticsPagedIterable<>(client.recognizeLinkedEntitiesBatch(documents, language)); - } - /** * Returns a list of recognized entities with links to a well-known knowledge base for the list of documents with * provided language code and request options. @@ -533,17 +362,16 @@ public TextAnalyticsPagedIterable recognizeLinked * @param options The {@link TextAnalyticsRequestOptions options} to configure the scoring model for documents * and show statistics. * - * @return A {@link TextAnalyticsPagedIterable} of the - * {@link LinkedEntity recognized linked entities document result}. + * @return A {@link RecognizeLinkedEntitiesResultCollection}. * * @throws NullPointerException if {@code documents} is {@code null}. * @throws IllegalArgumentException if {@code documents} is empty. */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedIterable recognizeLinkedEntitiesBatch( + @ServiceMethod(returns = ReturnType.SINGLE) + public RecognizeLinkedEntitiesResultCollection recognizeLinkedEntitiesBatch( Iterable documents, String language, TextAnalyticsRequestOptions options) { inputDocumentsValidation(documents); - return new TextAnalyticsPagedIterable<>(client.recognizeLinkedEntitiesBatch(documents, language, options)); + return client.recognizeLinkedEntitiesBatch(documents, language, options).block(); } /** @@ -564,19 +392,18 @@ public TextAnalyticsPagedIterable recognizeLinked * and show statistics. * @param context Additional context that is passed through the Http pipeline during the service call. * - * @return A {@link TextAnalyticsPagedIterable} of the - * {@link LinkedEntity recognized linked entities document result}. + * @return A {@link Response} that contains a {@link RecognizeLinkedEntitiesResultCollection}. * * @throws NullPointerException if {@code documents} is {@code null}. * @throws IllegalArgumentException if {@code documents} is empty. */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedIterable recognizeLinkedEntitiesBatch( - Iterable documents, TextAnalyticsRequestOptions options, Context context) { + @ServiceMethod(returns = ReturnType.SINGLE) + public Response + recognizeLinkedEntitiesBatchWithResponse(Iterable documents, + TextAnalyticsRequestOptions options, Context context) { inputDocumentsValidation(documents); - return new TextAnalyticsPagedIterable<>( - client.recognizeLinkedEntityAsyncClient.recognizeLinkedEntitiesBatchWithContext( - documents, options, context)); + return client.recognizeLinkedEntityAsyncClient.recognizeLinkedEntitiesBatchWithContext( + documents, options, context).block(); } // Key Phrase @@ -595,7 +422,7 @@ public TextAnalyticsPagedIterable recognizeLinked * For text length limits, maximum batch size, and supported text encoding, see * data limits. * - * @return A {@link TextAnalyticsPagedIterable} contains a list of extracted key phrases. + * @return A {@link KeyPhrasesCollection} contains a list of extracted key phrases. * * @throws NullPointerException if {@code document} is {@code null}. * @throws TextAnalyticsException if the response returned with an {@link TextAnalyticsError error}. @@ -619,7 +446,7 @@ public KeyPhrasesCollection extractKeyPhrases(String document) { * @param language The 2 letter ISO 639-1 representation of language for the document. If not set, uses "en" for * English as default. * - * @return A {@link TextAnalyticsPagedIterable} contains a list of extracted key phrases. + * @return A {@link KeyPhrasesCollection} contains a list of extracted key phrases. * * @throws NullPointerException if {@code document} is {@code null}. * @throws TextAnalyticsException if the response returned with an {@link TextAnalyticsError error}. @@ -630,61 +457,6 @@ public KeyPhrasesCollection extractKeyPhrases(String document, String language) return client.extractKeyPhrases(document, language).block(); } - /** - * Returns a list of strings denoting the key phrases in the document. - * - * This method will use the default language that sets up in - * {@link TextAnalyticsClientBuilder#defaultLanguage(String)}. If none is specified, service will use 'en' as - * the language. - * - *

Code Sample

- *

Extracts key phrases in a list of documents.

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrasesBatch#Iterable} - * - * @param documents A list of documents to be analyzed. - * For text length limits, maximum batch size, and supported text encoding, see - * data limits. - * - * @return A {@link TextAnalyticsPagedIterable} contains a list of - * {@link ExtractKeyPhraseResult extracted key phrases document result}. - * - * @throws NullPointerException if {@code documents} is {@code null}. - * @throws IllegalArgumentException if {@code documents} is empty. - */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedIterable extractKeyPhrasesBatch(Iterable documents) { - inputDocumentsValidation(documents); - return new TextAnalyticsPagedIterable<>(client.extractKeyPhrasesBatch(documents)); - } - - /** - * Returns a list of strings denoting the key phrases in the documents with provided language code. - * - * See this for the list of enabled languages. - * - *

Code Sample

- *

Extracts key phrases in a list of documents with a provided language code.

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrasesBatch#Iterable-String} - * - * @param documents A list of documents to be analyzed. - * For text length limits, maximum batch size, and supported text encoding, see - * data limits. - * @param language The 2 letter ISO 639-1 representation of language for the documents. If not set, uses "en" for - * English as default. - * - * @return A {@link TextAnalyticsPagedIterable} contains a list of - * {@link ExtractKeyPhraseResult extracted key phrases document result}. - * - * @throws NullPointerException if {@code documents} is {@code null}. - * @throws IllegalArgumentException if {@code documents} is empty. - */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedIterable extractKeyPhrasesBatch( - Iterable documents, String language) { - inputDocumentsValidation(documents); - return new TextAnalyticsPagedIterable<>(client.extractKeyPhrasesBatch(documents, language)); - } - /** * Returns a list of strings denoting the key phrases in the documents with provided language code and * request options. @@ -703,17 +475,16 @@ public TextAnalyticsPagedIterable extractKeyPhrasesBatch * @param options The {@link TextAnalyticsRequestOptions options} to configure the scoring model for documents * and show statistics. * - * @return A {@link TextAnalyticsPagedIterable} contains a list of - * {@link ExtractKeyPhraseResult extracted key phrases document result}. + * @return A {@link ExtractKeyPhrasesResultCollection}. * * @throws NullPointerException if {@code documents} is {@code null}. * @throws IllegalArgumentException if {@code documents} is empty. */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedIterable extractKeyPhrasesBatch( + @ServiceMethod(returns = ReturnType.SINGLE) + public ExtractKeyPhrasesResultCollection extractKeyPhrasesBatch( Iterable documents, String language, TextAnalyticsRequestOptions options) { inputDocumentsValidation(documents); - return new TextAnalyticsPagedIterable<>(client.extractKeyPhrasesBatch(documents, language, options)); + return client.extractKeyPhrasesBatch(documents, language, options).block(); } /** @@ -733,18 +504,17 @@ public TextAnalyticsPagedIterable extractKeyPhrasesBatch * and show statistics. * @param context Additional context that is passed through the Http pipeline during the service call. * - * @return A {@link TextAnalyticsPagedIterable} contains a list of - * {@link ExtractKeyPhraseResult extracted key phrases document result}. + * @return A {@link Response} that contains a {@link ExtractKeyPhrasesResultCollection}. * * @throws NullPointerException if {@code documents} is {@code null}. * @throws IllegalArgumentException if {@code documents} is empty. */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedIterable extractKeyPhrasesBatch( + @ServiceMethod(returns = ReturnType.SINGLE) + public Response extractKeyPhrasesBatchWithResponse( Iterable documents, TextAnalyticsRequestOptions options, Context context) { inputDocumentsValidation(documents); - return new TextAnalyticsPagedIterable<>( - client.extractKeyPhraseAsyncClient.extractKeyPhrasesBatchWithContext(documents, options, context)); + return client.extractKeyPhraseAsyncClient.extractKeyPhrasesBatchWithContext(documents, options, context) + .block(); } // Sentiment @@ -799,61 +569,6 @@ public DocumentSentiment analyzeSentiment(String document, String language) { return client.analyzeSentiment(document, language).block(); } - /** - * Returns a sentiment prediction, as well as confidence scores for each sentiment label - * (Positive, Negative, and Neutral) for the document and each sentence within it. - * - * This method will use the default language that sets up in - * {@link TextAnalyticsClientBuilder#defaultLanguage(String)}. If none is specified, service will use 'en' as - * the language. - * - *

Code Sample

- *

Analyze the sentiments in a list of documents.

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable} - * - * @param documents A list of documents to be analyzed. - * For text length limits, maximum batch size, and supported text encoding, see - * data limits. - * - * @return A {@link TextAnalyticsPagedIterable} contains a list of - * {@link AnalyzeSentimentResult analyzed sentiment document result}. - * - * @throws NullPointerException if {@code documents} is {@code null}. - * @throws IllegalArgumentException if {@code documents} is empty. - */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedIterable analyzeSentimentBatch(Iterable documents) { - inputDocumentsValidation(documents); - return new TextAnalyticsPagedIterable<>(client.analyzeSentimentBatch(documents)); - } - - /** - * Returns a sentiment prediction, as well as confidence scores for each sentiment label - * (Positive, Negative, and Neutral) for the document and each sentence within it. - * - *

Code Sample

- *

Analyze the sentiments in a list of documents with a provided language code.

- * {@codesnippet com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable-String} - * - * @param documents A list of documents to be analyzed. - * For text length limits, maximum batch size, and supported text encoding, see - * data limits. - * @param language The 2 letter ISO 639-1 representation of language for the documents. If not set, uses "en" for - * English as default.. - * - * @return A {@link TextAnalyticsPagedIterable} contains a list of - * {@link AnalyzeSentimentResult analyzed sentiment document result}. - * - * @throws NullPointerException if {@code documents} is {@code null}. - * @throws IllegalArgumentException if {@code documents} is empty. - */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedIterable analyzeSentimentBatch( - Iterable documents, String language) { - inputDocumentsValidation(documents); - return new TextAnalyticsPagedIterable<>(client.analyzeSentimentBatch(documents, language)); - } - /** * Returns a sentiment prediction, as well as confidence scores for each sentiment label * (Positive, Negative, and Neutral) for the document and each sentence within it. @@ -870,17 +585,16 @@ public TextAnalyticsPagedIterable analyzeSentimentBatch( * @param options The {@link TextAnalyticsRequestOptions options} to configure the scoring model for documents * and show statistics. * - * @return A {@link TextAnalyticsPagedIterable} contains a list of - * {@link AnalyzeSentimentResult analyzed sentiment document result}. + * @return A {@link AnalyzeSentimentResultCollection}. * * @throws NullPointerException if {@code documents} is {@code null}. * @throws IllegalArgumentException if {@code documents} is empty. */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedIterable analyzeSentimentBatch( + @ServiceMethod(returns = ReturnType.SINGLE) + public AnalyzeSentimentResultCollection analyzeSentimentBatch( Iterable documents, String language, TextAnalyticsRequestOptions options) { inputDocumentsValidation(documents); - return new TextAnalyticsPagedIterable<>(client.analyzeSentimentBatch(documents, language, options)); + return client.analyzeSentimentBatch(documents, language, options).block(); } /** @@ -899,17 +613,15 @@ public TextAnalyticsPagedIterable analyzeSentimentBatch( * and show statistics. * @param context Additional context that is passed through the Http pipeline during the service call. * - * @return A {@link TextAnalyticsPagedIterable} contains a list of - * {@link AnalyzeSentimentResult analyzed sentiment document result}. + * @return A {@link Response} that contains a {@link AnalyzeSentimentResultCollection}. * * @throws NullPointerException if {@code documents} is {@code null}. * @throws IllegalArgumentException if {@code documents} is empty. */ - @ServiceMethod(returns = ReturnType.COLLECTION) - public TextAnalyticsPagedIterable analyzeSentimentBatch( + @ServiceMethod(returns = ReturnType.SINGLE) + public Response analyzeSentimentBatchWithResponse( Iterable documents, TextAnalyticsRequestOptions options, Context context) { inputDocumentsValidation(documents); - return new TextAnalyticsPagedIterable<>( - client.analyzeSentimentAsyncClient.analyzeSentimentBatchWithContext(documents, options, context)); + return client.analyzeSentimentAsyncClient.analyzeSentimentBatchWithContext(documents, options, context).block(); } } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/implementation/Utility.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/implementation/Utility.java index 03e7ccf8b485..9555c37106f6 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/implementation/Utility.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/implementation/Utility.java @@ -13,6 +13,7 @@ import com.azure.ai.textanalytics.implementation.models.TextAnalyticsError; import com.azure.ai.textanalytics.implementation.models.TextAnalyticsErrorException; import com.azure.ai.textanalytics.models.DetectLanguageInput; +import com.azure.ai.textanalytics.models.TextAnalyticsErrorCode; import com.azure.ai.textanalytics.models.TextAnalyticsException; import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; import com.azure.ai.textanalytics.models.TextDocumentInput; @@ -173,14 +174,14 @@ public static com.azure.ai.textanalytics.models.TextAnalyticsError toTextAnalyti if (innerError == null) { final ErrorCodeValue errorCodeValue = textAnalyticsError.getCode(); return new com.azure.ai.textanalytics.models.TextAnalyticsError( - errorCodeValue == null ? null : errorCodeValue.toString(), + TextAnalyticsErrorCode.fromString(errorCodeValue == null ? null : errorCodeValue.toString()), textAnalyticsError.getMessage(), textAnalyticsError.getTarget()); } final InnerErrorCodeValue innerErrorCodeValue = innerError.getCode(); return new com.azure.ai.textanalytics.models.TextAnalyticsError( - innerErrorCodeValue == null ? null : innerErrorCodeValue.toString(), + TextAnalyticsErrorCode.fromString(innerErrorCodeValue == null ? null : innerErrorCodeValue.toString()), innerError.getMessage(), innerError.getTarget()); } @@ -209,7 +210,7 @@ public static List toMultiLanguageInput(Iterable sentences, IterableStream warnings) { - this.sentiment = TextSentiment.fromString(sentiment); + this.sentiment = sentiment; this.confidenceScores = confidenceScores; this.sentences = sentences; this.warnings = warnings; diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/models/EntityCategory.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/models/EntityCategory.java index 4e17276052d5..5d412ae6b1af 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/models/EntityCategory.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/models/EntityCategory.java @@ -91,7 +91,7 @@ public final class EntityCategory extends ExpandableStringEnum { * @return The corresponding {@link EntityCategory}. */ @JsonCreator - static EntityCategory fromString(String name) { + public static EntityCategory fromString(String name) { return fromString(name, EntityCategory.class); } } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/models/SentenceSentiment.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/models/SentenceSentiment.java index b14092e5c585..636739209fe9 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/models/SentenceSentiment.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/models/SentenceSentiment.java @@ -23,9 +23,9 @@ public final class SentenceSentiment { * @param confidenceScores The sentiment confidence score (Softmax score) between 0 and 1, for each sentiment label. * Higher values signify higher confidence. */ - public SentenceSentiment(String text, String sentiment, SentimentConfidenceScores confidenceScores) { + public SentenceSentiment(String text, TextSentiment sentiment, SentimentConfidenceScores confidenceScores) { this.text = text; - this.sentiment = TextSentiment.fromString(sentiment); + this.sentiment = sentiment; this.confidenceScores = confidenceScores; } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/models/TextAnalyticsError.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/models/TextAnalyticsError.java index 1e41e50e4edc..7857d22165bf 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/models/TextAnalyticsError.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/models/TextAnalyticsError.java @@ -28,13 +28,13 @@ public final class TextAnalyticsError { private final String target; /** - * Creates a {@code TextAnalyticsError} model that describes text analytics error. + * Creates a {@link TextAnalyticsError} model that describes text analytics error. * @param errorCode The error code. * @param message The error message. * @param target The error target. */ - public TextAnalyticsError(String errorCode, String message, String target) { - this.errorCode = TextAnalyticsErrorCode.fromString(errorCode); + public TextAnalyticsError(TextAnalyticsErrorCode errorCode, String message, String target) { + this.errorCode = errorCode; this.message = message; this.target = target; } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/models/TextAnalyticsErrorCode.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/models/TextAnalyticsErrorCode.java index 12d47c384163..3fcc2276b383 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/models/TextAnalyticsErrorCode.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/models/TextAnalyticsErrorCode.java @@ -7,11 +7,15 @@ import com.azure.core.util.ExpandableStringEnum; import com.fasterxml.jackson.annotation.JsonCreator; +import java.io.Serializable; + /** * Defines values for TextAnalyticsErrorCode. */ @Immutable -public final class TextAnalyticsErrorCode extends ExpandableStringEnum { +public final class TextAnalyticsErrorCode extends ExpandableStringEnum implements Serializable { + private static final long serialVersionUID = 21436310107606058L; + /** * Enum value invalidRequest. */ @@ -84,7 +88,7 @@ public final class TextAnalyticsErrorCode extends ExpandableStringEnum { * @return The corresponding {@link TextSentiment}. */ @JsonCreator - static TextSentiment fromString(String name) { + public static TextSentiment fromString(String name) { return fromString(name, TextSentiment.class); } } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/models/WarningCode.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/models/WarningCode.java index 1bc7b5ea56d7..ce3e3e17d26e 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/models/WarningCode.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/models/WarningCode.java @@ -30,7 +30,7 @@ public final class WarningCode extends ExpandableStringEnum { * @return The corresponding {@link WarningCode}. */ @JsonCreator - static WarningCode fromString(String name) { + public static WarningCode fromString(String name) { return fromString(name, WarningCode.class); } } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/AnalyzeSentimentResultCollection.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/AnalyzeSentimentResultCollection.java new file mode 100644 index 000000000000..f9d4b9f1497a --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/AnalyzeSentimentResultCollection.java @@ -0,0 +1,49 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.ai.textanalytics.util; + +import com.azure.ai.textanalytics.models.AnalyzeSentimentResult; +import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; +import com.azure.core.util.IterableStream; + +/** + * A collection model that contains a list of {@link AnalyzeSentimentResult} along with model version and + * batch's statistics. + */ +public class AnalyzeSentimentResultCollection extends IterableStream { + private final String modelVersion; + private final TextDocumentBatchStatistics statistics; + /** + * Create a {@link AnalyzeSentimentResultCollection} model that maintains a list of {@link AnalyzeSentimentResult} + * along with model version and batch's statistics. + * + * @param documentResults A list of {@link AnalyzeSentimentResult}. + * @param modelVersion The model version trained in service for the request. + * @param statistics The batch statistics of response. + */ + public AnalyzeSentimentResultCollection(Iterable documentResults, String modelVersion, + TextDocumentBatchStatistics statistics) { + super(documentResults); + this.modelVersion = modelVersion; + this.statistics = statistics; + } + + /** + * Get the model version trained in service for the request. + * + * @return The model version trained in service for the request. + */ + public String getModelVersion() { + return modelVersion; + } + + /** + * Get the batch statistics of response. + * + * @return The batch statistics of response. + */ + public TextDocumentBatchStatistics getStatistics() { + return statistics; + } +} diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/DetectLanguageResultCollection.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/DetectLanguageResultCollection.java new file mode 100644 index 000000000000..467e201f9ed1 --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/DetectLanguageResultCollection.java @@ -0,0 +1,49 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.ai.textanalytics.util; + +import com.azure.ai.textanalytics.models.DetectLanguageResult; +import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; +import com.azure.core.util.IterableStream; + +/** + * A collection model that contains a list of {@link DetectLanguageResult} along with model version and + * batch's statistics. + */ +public class DetectLanguageResultCollection extends IterableStream { + private final String modelVersion; + private final TextDocumentBatchStatistics statistics; + /** + * Create a {@link DetectLanguageResultCollection} model that maintains a list of {@link DetectLanguageResult} + * along with model version and batch's statistics. + * + * @param documentResults A list of {@link DetectLanguageResult}. + * @param modelVersion The model version trained in service for the request. + * @param statistics The batch statistics of response. + */ + public DetectLanguageResultCollection(Iterable documentResults, String modelVersion, + TextDocumentBatchStatistics statistics) { + super(documentResults); + this.modelVersion = modelVersion; + this.statistics = statistics; + } + + /** + * Get the model version trained in service for the request. + * + * @return The model version trained in service for the request. + */ + public String getModelVersion() { + return modelVersion; + } + + /** + * Get the batch statistics of response. + * + * @return The batch statistics of response. + */ + public TextDocumentBatchStatistics getStatistics() { + return statistics; + } +} diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/ExtractKeyPhrasesResultCollection.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/ExtractKeyPhrasesResultCollection.java new file mode 100644 index 000000000000..69487ca88948 --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/ExtractKeyPhrasesResultCollection.java @@ -0,0 +1,49 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.ai.textanalytics.util; + +import com.azure.ai.textanalytics.models.ExtractKeyPhraseResult; +import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; +import com.azure.core.util.IterableStream; + +/** + * A collection model that contains a list of {@link ExtractKeyPhraseResult} along with model version and + * batch's statistics. + */ +public class ExtractKeyPhrasesResultCollection extends IterableStream { + private final String modelVersion; + private final TextDocumentBatchStatistics statistics; + /** + * Create a {@link ExtractKeyPhrasesResultCollection} model that maintains a list of {@link ExtractKeyPhraseResult} + * along with model version and batch's statistics. + * + * @param documentResults A list of {@link ExtractKeyPhraseResult}. + * @param modelVersion The model version trained in service for the request. + * @param statistics The batch statistics of response. + */ + public ExtractKeyPhrasesResultCollection(Iterable documentResults, String modelVersion, + TextDocumentBatchStatistics statistics) { + super(documentResults); + this.modelVersion = modelVersion; + this.statistics = statistics; + } + + /** + * Get the model version trained in service for the request. + * + * @return The model version trained in service for the request. + */ + public String getModelVersion() { + return modelVersion; + } + + /** + * Get the batch statistics of response. + * + * @return The batch statistics of response. + */ + public TextDocumentBatchStatistics getStatistics() { + return statistics; + } +} diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/RecognizeEntitiesResultCollection.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/RecognizeEntitiesResultCollection.java new file mode 100644 index 000000000000..0393446f04f6 --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/RecognizeEntitiesResultCollection.java @@ -0,0 +1,49 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.ai.textanalytics.util; + +import com.azure.ai.textanalytics.models.RecognizeEntitiesResult; +import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; +import com.azure.core.util.IterableStream; + +/** + * A collection model that contains a list of {@link RecognizeEntitiesResult} along with model version and + * batch's statistics. + */ +public class RecognizeEntitiesResultCollection extends IterableStream { + private final String modelVersion; + private final TextDocumentBatchStatistics statistics; + /** + * Create a {@link RecognizeEntitiesResultCollection} model that maintains a list of {@link RecognizeEntitiesResult} + * along with model version and batch's statistics. + * + * @param documentResults A list of {@link RecognizeEntitiesResult}. + * @param modelVersion The model version trained in service for the request. + * @param statistics The batch statistics of response. + */ + public RecognizeEntitiesResultCollection(Iterable documentResults, String modelVersion, + TextDocumentBatchStatistics statistics) { + super(documentResults); + this.modelVersion = modelVersion; + this.statistics = statistics; + } + + /** + * Get the model version trained in service for the request. + * + * @return The model version trained in service for the request. + */ + public String getModelVersion() { + return modelVersion; + } + + /** + * Get the batch statistics of response. + * + * @return The batch statistics of response. + */ + public TextDocumentBatchStatistics getStatistics() { + return statistics; + } +} diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/RecognizeLinkedEntitiesResultCollection.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/RecognizeLinkedEntitiesResultCollection.java new file mode 100644 index 000000000000..e3bf8d3f8e50 --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/RecognizeLinkedEntitiesResultCollection.java @@ -0,0 +1,49 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.ai.textanalytics.util; + +import com.azure.ai.textanalytics.models.RecognizeLinkedEntitiesResult; +import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; +import com.azure.core.util.IterableStream; + +/** + * A collection model that contains a list of {@link RecognizeLinkedEntitiesResult} along with model version and + * batch's statistics. + */ +public class RecognizeLinkedEntitiesResultCollection extends IterableStream { + private final String modelVersion; + private final TextDocumentBatchStatistics statistics; + /** + * Create a {@link RecognizeLinkedEntitiesResultCollection} model that maintains a list of + * {@link RecognizeLinkedEntitiesResult} along with model version and batch's statistics. + * + * @param documentResults A list of {@link RecognizeLinkedEntitiesResult}. + * @param modelVersion The model version trained in service for the request. + * @param statistics The batch statistics of response. + */ + public RecognizeLinkedEntitiesResultCollection(Iterable documentResults, + String modelVersion, TextDocumentBatchStatistics statistics) { + super(documentResults); + this.modelVersion = modelVersion; + this.statistics = statistics; + } + + /** + * Get the model version trained in service for the request. + * + * @return The model version trained in service for the request. + */ + public String getModelVersion() { + return modelVersion; + } + + /** + * Get the batch statistics of response. + * + * @return The batch statistics of response. + */ + public TextDocumentBatchStatistics getStatistics() { + return statistics; + } +} diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/TextAnalyticsPagedFlux.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/TextAnalyticsPagedFlux.java deleted file mode 100644 index 866e389e321d..000000000000 --- a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/TextAnalyticsPagedFlux.java +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.ai.textanalytics.util; - -import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; -import com.azure.core.annotation.Immutable; -import com.azure.core.http.rest.PagedResponseBase; -import com.azure.core.util.paging.ContinuablePagedFluxCore; -import com.azure.core.util.paging.PageRetriever; - -import java.util.function.Supplier; - -/** - * An implementation of {@link ContinuablePagedFluxCore} that uses {@link TextAnalyticsPagedResponse} which extends - * default {@link PagedResponseBase} along with {@code modelVersion} and {@link TextDocumentBatchStatistics}. - * - *

Code sample using {@link TextAnalyticsPagedFlux}

- * {@codesnippet com.azure.ai.textanalytics.util.TextAnalyticsPagedFlux.subscribe} - * - *

Code sample using {@link TextAnalyticsPagedFlux} by page

- * {@codesnippet com.azure.ai.textanalytics.util.TextAnalyticsPagedFlux.subscribeByPage} - * - * @param The type of items contained in the {@link TextAnalyticsPagedFlux} - * - * @see ContinuablePagedFluxCore - */ -@Immutable -public final class TextAnalyticsPagedFlux - extends ContinuablePagedFluxCore> { - /** - * Create an instance of {@link TextAnalyticsPagedFlux} - * - * @param pageRetrieverProvider a provider that returns {@link PageRetriever} - */ - public TextAnalyticsPagedFlux( - Supplier>> pageRetrieverProvider) { - super(pageRetrieverProvider); - } -} - - diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/TextAnalyticsPagedIterable.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/TextAnalyticsPagedIterable.java deleted file mode 100644 index fe6102a83cac..000000000000 --- a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/TextAnalyticsPagedIterable.java +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.ai.textanalytics.util; - -import com.azure.core.annotation.Immutable; -import com.azure.core.http.rest.PagedResponse; -import com.azure.core.util.IterableStream; - -import java.util.stream.Stream; - -/** - * This class provides utility to iterate over {@link PagedResponse} using {@link Stream} and {@link Iterable} - * interfaces. - * - *

Code sample using {@link Stream}

- * {@codesnippet com.azure.ai.textanalytics.util.TextAnalyticsPagedIterable.stream} - * - *

Code sample using {@link Stream} by page

- * {@codesnippet com.azure.ai.textanalytics.util.TextAnalyticsPagedIterable.streamByPage} - * - *

Code sample using {@link Iterable}

- * {@codesnippet com.azure.ai.textanalytics.util.TextAnalyticsPagedIterable.iterator} - * - *

Code sample using {@link Iterable} by page

- * {@codesnippet com.azure.ai.textanalytics.util.TextAnalyticsPagedIterable.iterableByPage} - * - * @param The type of items contained in the {@link TextAnalyticsPagedIterable} - * - * @see IterableStream - */ -@Immutable -public final class TextAnalyticsPagedIterable extends IterableStream { - /* - * This is the default batch size that will be requested when using stream or iterable by page, this will indicate - * to Reactor how many elements should be prefetch before another batch is requested. - */ - private static final int DEFAULT_BATCH_SIZE = 1; - - private final TextAnalyticsPagedFlux textAnalyticsPagedFlux; - - /** - * Creates instance given {@link TextAnalyticsPagedFlux}. - * - * @param textAnalyticsPagedFlux It used as iterable. - */ - public TextAnalyticsPagedIterable(TextAnalyticsPagedFlux textAnalyticsPagedFlux) { - super(textAnalyticsPagedFlux); - this.textAnalyticsPagedFlux = textAnalyticsPagedFlux; - } - - /** - * Retrieve the {@link Stream}, one page at a time, starting from the next page associated with the given - * continuation token. To start from first page, use {@link #streamByPage()} instead. - * - * @return {@link Stream} of a {@link TextAnalyticsPagedResponse} of {@code T}. - */ - public Stream> streamByPage() { - return textAnalyticsPagedFlux.byPage().toStream(DEFAULT_BATCH_SIZE); - } - - /** - * Provides {@link Iterable} API for {@link TextAnalyticsPagedResponse}. - * - * @return {@link Iterable} of {@link TextAnalyticsPagedResponse} of {@code T}. - */ - public Iterable> iterableByPage() { - return textAnalyticsPagedFlux.byPage().toIterable(DEFAULT_BATCH_SIZE); - } -} diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/TextAnalyticsPagedResponse.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/TextAnalyticsPagedResponse.java deleted file mode 100644 index 83d9260e9fb8..000000000000 --- a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/TextAnalyticsPagedResponse.java +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.ai.textanalytics.util; - -import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; -import com.azure.core.annotation.Immutable; -import com.azure.core.http.HttpHeaders; -import com.azure.core.http.HttpRequest; -import com.azure.core.http.rest.PagedResponse; -import com.azure.core.http.rest.PagedResponseBase; - -import java.util.List; - -/** - * This type extends {@link PagedResponse} along with the model version that trained in service and the - * {@link TextDocumentBatchStatistics batch statistics of response}. - * - * @param The type of items contained in the {@link TextAnalyticsPagedResponse} - * - * @see PagedResponseBase - */ -@Immutable -public final class TextAnalyticsPagedResponse extends PagedResponseBase { - private final String modelVersion; - private final TextDocumentBatchStatistics statistics; - - /** - * Create a new instance of the {@link TextAnalyticsPagedResponse}. - * - * @param request The HttpRequest that was sent to the service whose response resulted in this response. - * @param statusCode The status code from the response. - * @param headers The headers from the response. - * @param items The items returned from the service within the response. - * @param continuationToken The continuation token returned from the service, to enable future requests to pick up - * from the same place in the paged iteration. - * @param modelVersion The model version trained in service for the request. - * @param statistics The batch statistics of response. - */ - public TextAnalyticsPagedResponse(HttpRequest request, int statusCode, HttpHeaders headers, List items, - String continuationToken, String modelVersion, TextDocumentBatchStatistics statistics) { - super(request, statusCode, headers, items, continuationToken, null); - this.modelVersion = modelVersion; - this.statistics = statistics; - } - - /** - * Get the model version trained in service for the request. - * - * @return The model version trained in service for the request. - */ - public String getModelVersion() { - return modelVersion; - } - - /** - * Get the batch statistics of response. - * - * @return The batch statistics of response. - */ - public TextDocumentBatchStatistics getStatistics() { - return statistics; - } -} diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/package-info.java b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/package-info.java index f257ab3315dd..4e612dc24040 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/package-info.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/util/package-info.java @@ -2,8 +2,6 @@ // Licensed under the MIT License. /** - * Package containing classes for creating a {@link com.azure.ai.textanalytics.util.TextAnalyticsPagedFlux} - * and {@link com.azure.ai.textanalytics.util.TextAnalyticsPagedIterable} to use them to perform operations on - * Azure Text Analytics. + * Package containing Azure Text Analytics collection types that contain the results for batch operations. */ package com.azure.ai.textanalytics.util; diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/ReadmeSamples.java b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/ReadmeSamples.java index de08ebd1aea1..70b6b6f6828f 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/ReadmeSamples.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/ReadmeSamples.java @@ -89,7 +89,7 @@ public void handlingException() { ); try { - textAnalyticsClient.detectLanguageBatch(documents, null, Context.NONE); + textAnalyticsClient.detectLanguageBatchWithResponse(documents, null, Context.NONE); } catch (HttpResponseException e) { System.out.println(e.getMessage()); } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/TextAnalyticsAsyncClientJavaDocCodeSnippets.java b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/TextAnalyticsAsyncClientJavaDocCodeSnippets.java index 0aa63768ba5e..fcb6645a8b32 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/TextAnalyticsAsyncClientJavaDocCodeSnippets.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/TextAnalyticsAsyncClientJavaDocCodeSnippets.java @@ -3,21 +3,23 @@ package com.azure.ai.textanalytics; +import com.azure.ai.textanalytics.util.AnalyzeSentimentResultCollection; import com.azure.ai.textanalytics.models.DetectLanguageInput; import com.azure.ai.textanalytics.models.DetectLanguageResult; +import com.azure.ai.textanalytics.util.DetectLanguageResultCollection; import com.azure.ai.textanalytics.models.DetectedLanguage; import com.azure.ai.textanalytics.models.DocumentSentiment; import com.azure.ai.textanalytics.models.ExtractKeyPhraseResult; -import com.azure.ai.textanalytics.models.RecognizeEntitiesResult; +import com.azure.ai.textanalytics.util.ExtractKeyPhrasesResultCollection; +import com.azure.ai.textanalytics.util.RecognizeEntitiesResultCollection; +import com.azure.ai.textanalytics.util.RecognizeLinkedEntitiesResultCollection; import com.azure.ai.textanalytics.models.SentenceSentiment; import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; import com.azure.ai.textanalytics.models.TextDocumentInput; -import com.azure.ai.textanalytics.util.TextAnalyticsPagedFlux; import com.azure.core.credential.AzureKeyCredential; import java.util.Arrays; -import java.util.Collections; import java.util.List; /** @@ -84,55 +86,6 @@ public void detectLanguageWithCountryHint() { // END: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.detectLanguage#string-string } - /** - * Code snippet for {@link TextAnalyticsAsyncClient#detectLanguageBatch(Iterable)} - */ - public void detectLanguageStringList() { - // BEGIN: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.detectLanguageBatch#Iterable - final List documents = Arrays.asList( - "This is written in English", "Este es un documento escrito en Español."); - textAnalyticsAsyncClient.detectLanguageBatch(documents).byPage().subscribe(batchResult -> { - // Batch statistics - final TextDocumentBatchStatistics batchStatistics = batchResult.getStatistics(); - System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - // Batch result of languages - for (DetectLanguageResult detectLanguageResult : batchResult.getElements()) { - DetectedLanguage detectedLanguage = detectLanguageResult.getPrimaryLanguage(); - System.out.printf("Detected language name: %s, ISO 6391 Name: %s, confidence score: %f.%n", - detectedLanguage.getName(), detectedLanguage.getIso6391Name(), - detectedLanguage.getConfidenceScore()); - } - }); - // END: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.detectLanguageBatch#Iterable - } - - /** - * Code snippet for {@link TextAnalyticsAsyncClient#detectLanguageBatch(Iterable, String)} - */ - public void detectLanguageStringListWithCountryHint() { - // BEGIN: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.detectLanguageBatch#Iterable-String - List documents = Arrays.asList( - "This is written in English", - "Este es un documento escrito en Español." - ); - textAnalyticsAsyncClient.detectLanguageBatch(documents, "US").byPage().subscribe( - batchResult -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = batchResult.getStatistics(); - System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - // Batch result of languages - for (DetectLanguageResult detectLanguageResult : batchResult.getElements()) { - DetectedLanguage detectedLanguage = detectLanguageResult.getPrimaryLanguage(); - System.out.printf("Detected language name: %s, ISO 6391 Name: %s, confidence score: %f.%n", - detectedLanguage.getName(), detectedLanguage.getIso6391Name(), - detectedLanguage.getConfidenceScore()); - } - }); - // END: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.detectLanguageBatch#Iterable-String - } - /** * Code snippet for {@link TextAnalyticsAsyncClient#detectLanguageBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @@ -142,14 +95,14 @@ public void detectLanguageStringListWithOptions() { "This is written in English", "Este es un documento escrito en Español." ); - textAnalyticsAsyncClient.detectLanguageBatch(documents, "US", null).byPage().subscribe( + textAnalyticsAsyncClient.detectLanguageBatch(documents, "US", null).subscribe( batchResult -> { // Batch statistics TextDocumentBatchStatistics batchStatistics = batchResult.getStatistics(); System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); // Batch result of languages - for (DetectLanguageResult detectLanguageResult : batchResult.getElements()) { + for (DetectLanguageResult detectLanguageResult : batchResult) { DetectedLanguage detectedLanguage = detectLanguageResult.getPrimaryLanguage(); System.out.printf("Detected language name: %s, ISO 6391 Name: %s, confidence score: %f.%n", detectedLanguage.getName(), detectedLanguage.getIso6391Name(), @@ -160,7 +113,7 @@ public void detectLanguageStringListWithOptions() { } /** - * Code snippet for {@link TextAnalyticsAsyncClient#detectLanguageBatch(Iterable, TextAnalyticsRequestOptions)} + * Code snippet for {@link TextAnalyticsAsyncClient#detectLanguageBatchWithResponse(Iterable, TextAnalyticsRequestOptions)} */ public void detectBatchLanguagesMaxOverload() { // BEGIN: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.detectLanguageBatch#Iterable-TextAnalyticsRequestOptions @@ -172,14 +125,18 @@ public void detectBatchLanguagesMaxOverload() { // Request options: show statistics and model version TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true); - textAnalyticsAsyncClient.detectLanguageBatch(detectLanguageInputs1, requestOptions).byPage() + textAnalyticsAsyncClient.detectLanguageBatchWithResponse(detectLanguageInputs1, requestOptions) .subscribe(response -> { + // Response's status code + System.out.printf("Status code of request response: %d%n", response.getStatusCode()); + + DetectLanguageResultCollection resultCollection = response.getValue(); // Batch statistics - TextDocumentBatchStatistics batchStatistics = response.getStatistics(); + TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); // Batch result of languages - for (DetectLanguageResult detectLanguageResult : response.getElements()) { + for (DetectLanguageResult detectLanguageResult : resultCollection) { DetectedLanguage detectedLanguage = detectLanguageResult.getPrimaryLanguage(); System.out.printf("Detected language name: %s, ISO 6391 Name: %s, confidence score: %f.%n", detectedLanguage.getName(), detectedLanguage.getIso6391Name(), @@ -221,53 +178,6 @@ public void recognizeEntitiesWithLanguage() { // END: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeEntities#string-string } - /** - * Code snippet for {@link TextAnalyticsAsyncClient#recognizeEntitiesBatch(Iterable)} - */ - public void recognizeEntitiesStringList() { - // BEGIN: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeCategorizedEntitiesBatch#Iterable - List documents = Arrays.asList( - "I had a wonderful trip to Seattle last week.", - "I work at Microsoft." - ); - - textAnalyticsAsyncClient.recognizeEntitiesBatch(documents).byPage().subscribe(batchResult -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = batchResult.getStatistics(); - System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - // Batch result of categorized entities - batchResult.getElements().forEach(recognizeEntitiesResult -> - recognizeEntitiesResult.getEntities().forEach(entity -> System.out.printf( - "Recognized entity: %s, entity category: %s, entity subcategory: %s, confidence score: %f.%n", - entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore()))); - }); - // END: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeCategorizedEntitiesBatch#Iterable - } - - /** - * Code snippet for {@link TextAnalyticsAsyncClient#recognizeEntitiesBatch(Iterable, String)} - */ - public void recognizeEntitiesStringListWithLanguageCode() { - // BEGIN: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeCategorizedEntitiesBatch#Iterable-String - List documents = Arrays.asList( - "I had a wonderful trip to Seattle last week.", "I work at Microsoft."); - - textAnalyticsAsyncClient.recognizeEntitiesBatch(documents, "en").byPage() - .subscribe(batchResult -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = batchResult.getStatistics(); - System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - // Batch Result of entities - batchResult.getElements().forEach(recognizeEntitiesResult -> - recognizeEntitiesResult.getEntities().forEach(entity -> System.out.printf( - "Recognized categorized entity: %s, category: %s, confidence score: %f.%n", - entity.getText(), entity.getCategory(), entity.getConfidenceScore()))); - }); - // END: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeCategorizedEntitiesBatch#Iterable-String - } - /** * Code snippet for {@link TextAnalyticsAsyncClient#recognizeEntitiesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @@ -276,14 +186,14 @@ public void recognizeEntitiesStringListWithOptions() { List documents = Arrays.asList( "I had a wonderful trip to Seattle last week.", "I work at Microsoft."); - textAnalyticsAsyncClient.recognizeEntitiesBatch(documents, "en", null).byPage() + textAnalyticsAsyncClient.recognizeEntitiesBatch(documents, "en", null) .subscribe(batchResult -> { // Batch statistics TextDocumentBatchStatistics batchStatistics = batchResult.getStatistics(); System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); // Batch Result of entities - batchResult.getElements().forEach(recognizeEntitiesResult -> + batchResult.forEach(recognizeEntitiesResult -> recognizeEntitiesResult.getEntities().forEach(entity -> System.out.printf( "Recognized categorized entity: %s, category: %s, confidence score: %f.%n", entity.getText(), entity.getCategory(), entity.getConfidenceScore()))); @@ -292,8 +202,7 @@ public void recognizeEntitiesStringListWithOptions() { } /** - * Code snippet for {@link TextAnalyticsAsyncClient#recognizeEntitiesBatch(Iterable, - * TextAnalyticsRequestOptions)} + * Code snippet for {@link TextAnalyticsAsyncClient#recognizeEntitiesBatchWithResponse(Iterable, TextAnalyticsRequestOptions)} */ public void recognizeBatchEntitiesMaxOverload() { // BEGIN: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeCategorizedEntitiesBatch#Iterable-TextAnalyticsRequestOptions @@ -304,14 +213,18 @@ public void recognizeBatchEntitiesMaxOverload() { // Request options: show statistics and model version TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true); - textAnalyticsAsyncClient.recognizeEntitiesBatch(textDocumentInputs1, requestOptions).byPage() + textAnalyticsAsyncClient.recognizeEntitiesBatchWithResponse(textDocumentInputs1, requestOptions) .subscribe(response -> { + // Response's status code + System.out.printf("Status code of request response: %d%n", response.getStatusCode()); + RecognizeEntitiesResultCollection resultCollection = response.getValue(); + // Batch statistics - TextDocumentBatchStatistics batchStatistics = response.getStatistics(); + TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - response.getElements().forEach(recognizeEntitiesResult -> + resultCollection.forEach(recognizeEntitiesResult -> recognizeEntitiesResult.getEntities().forEach(entity -> System.out.printf( "Recognized categorized entity: %s, category: %s, confidence score: %f.%n", entity.getText(), @@ -363,70 +276,6 @@ public void recognizeLinkedEntitiesWithLanguage() { // END: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeLinkedEntities#string-string } - /** - * Code snippet for {@link TextAnalyticsAsyncClient#recognizeLinkedEntitiesBatch(Iterable)} - */ - public void recognizeLinkedEntitiesStringList() { - - // BEGIN: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeLinkedEntitiesBatch#Iterable - List documents = Arrays.asList( - "Old Faithful is a geyser at Yellowstone Park.", - "Mount Shasta has lenticular clouds." - ); - - textAnalyticsAsyncClient.recognizeLinkedEntitiesBatch(documents).byPage().subscribe(response -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = response.getStatistics(); - System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - - response.getElements().forEach(recognizeLinkedEntitiesResult -> - recognizeLinkedEntitiesResult.getEntities().forEach(linkedEntity -> { - System.out.println("Linked Entities:"); - System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n", - linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(), - linkedEntity.getDataSource()); - linkedEntity.getMatches().forEach(entityMatch -> System.out.printf( - "Matched entity: %s, confidence score: %f.%n", - entityMatch.getText(), entityMatch.getConfidenceScore())); - })); - }); - // END: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeLinkedEntitiesBatch#Iterable - - } - - /** - * Code snippet for {@link TextAnalyticsAsyncClient#recognizeLinkedEntitiesBatch(Iterable, String)} - */ - public void recognizeLinkedEntitiesStringListWithLanguageCode() { - - // BEGIN: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeLinkedEntitiesBatch#Iterable-String - List documents = Arrays.asList( - "Old Faithful is a geyser at Yellowstone Park.", - "Mount Shasta has lenticular clouds." - ); - - textAnalyticsAsyncClient.recognizeLinkedEntitiesBatch(documents, "en").byPage() - .subscribe(batchResult -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = batchResult.getStatistics(); - System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - - batchResult.getElements().forEach(recognizeLinkedEntitiesResult -> - recognizeLinkedEntitiesResult.getEntities().forEach(linkedEntity -> { - System.out.println("Linked Entities:"); - System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n", - linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(), - linkedEntity.getDataSource()); - linkedEntity.getMatches().forEach(entityMatch -> System.out.printf( - "Matched entity: %s, confidence score: %f.%n", - entityMatch.getText(), entityMatch.getConfidenceScore())); - })); - }); - // END: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeLinkedEntitiesBatch#Iterable-String - } - /** * Code snippet for {@link TextAnalyticsAsyncClient#recognizeLinkedEntitiesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @@ -438,14 +287,14 @@ public void recognizeLinkedEntitiesStringListWithOptions() { "Mount Shasta has lenticular clouds." ); - textAnalyticsAsyncClient.recognizeLinkedEntitiesBatch(documents, "en", null).byPage() + textAnalyticsAsyncClient.recognizeLinkedEntitiesBatch(documents, "en", null) .subscribe(batchResult -> { // Batch statistics TextDocumentBatchStatistics batchStatistics = batchResult.getStatistics(); System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - batchResult.getElements().forEach(recognizeLinkedEntitiesResult -> + batchResult.forEach(recognizeLinkedEntitiesResult -> recognizeLinkedEntitiesResult.getEntities().forEach(linkedEntity -> { System.out.println("Linked Entities:"); System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n", @@ -460,8 +309,7 @@ public void recognizeLinkedEntitiesStringListWithOptions() { } /** - * Code snippet for {@link TextAnalyticsAsyncClient#recognizeLinkedEntitiesBatch(Iterable, - * TextAnalyticsRequestOptions)} + * Code snippet for {@link TextAnalyticsAsyncClient#recognizeLinkedEntitiesBatchWithResponse(Iterable, TextAnalyticsRequestOptions)} */ public void recognizeBatchLinkedEntitiesMaxOverload() { // BEGIN: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.recognizeLinkedEntitiesBatch#Iterable-TextAnalyticsRequestOptions @@ -472,14 +320,18 @@ public void recognizeBatchLinkedEntitiesMaxOverload() { // Request options: show statistics and model version TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true); - textAnalyticsAsyncClient.recognizeLinkedEntitiesBatch(textDocumentInputs1, requestOptions).byPage() + textAnalyticsAsyncClient.recognizeLinkedEntitiesBatchWithResponse(textDocumentInputs1, requestOptions) .subscribe(response -> { + // Response's status code + System.out.printf("Status code of request response: %d%n", response.getStatusCode()); + RecognizeLinkedEntitiesResultCollection resultCollection = response.getValue(); + // Batch statistics - TextDocumentBatchStatistics batchStatistics = response.getStatistics(); + TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - response.getElements().forEach(recognizeLinkedEntitiesResult -> + resultCollection.forEach(recognizeLinkedEntitiesResult -> recognizeLinkedEntitiesResult.getEntities().forEach(linkedEntity -> { System.out.println("Linked Entities:"); System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n", @@ -517,53 +369,6 @@ public void extractKeyPhrasesWithLanguage() { // END: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.extractKeyPhrases#string-string } - /** - * Code snippet for {@link TextAnalyticsAsyncClient#extractKeyPhrasesBatch(Iterable)} - */ - public void extractKeyPhrasesStringList() { - // BEGIN: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.extractKeyPhrasesBatch#Iterable - List documents = Arrays.asList( - "Hello world. This is some input text that I love.", - "Bonjour tout le monde"); - - textAnalyticsAsyncClient.extractKeyPhrasesBatch(documents).byPage().subscribe(extractKeyPhraseResults -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = extractKeyPhraseResults.getStatistics(); - System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - - extractKeyPhraseResults.getElements().forEach(extractKeyPhraseResult -> { - System.out.println("Extracted phrases:"); - extractKeyPhraseResult.getKeyPhrases().forEach(keyPhrase -> System.out.printf("%s.%n", keyPhrase)); - }); - }); - // END: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.extractKeyPhrasesBatch#Iterable - } - - /** - * Code snippet for {@link TextAnalyticsAsyncClient#extractKeyPhrasesBatch(Iterable, String)} - */ - public void extractKeyPhrasesStringListWithLanguageCode() { - // BEGIN: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.extractKeyPhrasesBatch#Iterable-String - List documents = Arrays.asList( - "Hello world. This is some input text that I love.", - "Bonjour tout le monde"); - - textAnalyticsAsyncClient.extractKeyPhrasesBatch(documents, "en").byPage().subscribe( - extractKeyPhraseResults -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = extractKeyPhraseResults.getStatistics(); - System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - - extractKeyPhraseResults.getElements().forEach(extractKeyPhraseResult -> { - System.out.println("Extracted phrases:"); - extractKeyPhraseResult.getKeyPhrases().forEach(keyPhrase -> System.out.printf("%s.%n", keyPhrase)); - }); - }); - // END: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.extractKeyPhrasesBatch#Iterable-String - } - /** * Code snippet for {@link TextAnalyticsAsyncClient#extractKeyPhrasesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @@ -573,14 +378,14 @@ public void extractKeyPhrasesStringListWithOptions() { "Hello world. This is some input text that I love.", "Bonjour tout le monde"); - textAnalyticsAsyncClient.extractKeyPhrasesBatch(documents, "en", null).byPage().subscribe( + textAnalyticsAsyncClient.extractKeyPhrasesBatch(documents, "en", null).subscribe( extractKeyPhraseResults -> { // Batch statistics TextDocumentBatchStatistics batchStatistics = extractKeyPhraseResults.getStatistics(); System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - extractKeyPhraseResults.getElements().forEach(extractKeyPhraseResult -> { + extractKeyPhraseResults.forEach(extractKeyPhraseResult -> { System.out.println("Extracted phrases:"); extractKeyPhraseResult.getKeyPhrases().forEach(keyPhrase -> System.out.printf("%s.%n", keyPhrase)); }); @@ -589,8 +394,7 @@ public void extractKeyPhrasesStringListWithOptions() { } /** - * Code snippet for {@link TextAnalyticsAsyncClient#extractKeyPhrasesBatch(Iterable, - * TextAnalyticsRequestOptions)} + * Code snippet for {@link TextAnalyticsAsyncClient#extractKeyPhrasesBatchWithResponse(Iterable, TextAnalyticsRequestOptions)} */ public void extractBatchKeyPhrasesMaxOverload() { // BEGIN: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.extractKeyPhrasesBatch#Iterable-TextAnalyticsRequestOptions @@ -601,14 +405,18 @@ public void extractBatchKeyPhrasesMaxOverload() { // Request options: show statistics and model version TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true); - textAnalyticsAsyncClient.extractKeyPhrasesBatch(textDocumentInputs1, requestOptions).byPage() + textAnalyticsAsyncClient.extractKeyPhrasesBatchWithResponse(textDocumentInputs1, requestOptions) .subscribe(response -> { + // Response's status code + System.out.printf("Status code of request response: %d%n", response.getStatusCode()); + ExtractKeyPhrasesResultCollection resultCollection = response.getValue(); + // Batch statistics - TextDocumentBatchStatistics batchStatistics = response.getStatistics(); + TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - for (ExtractKeyPhraseResult extractKeyPhraseResult : response.getElements()) { + for (ExtractKeyPhraseResult extractKeyPhraseResult : resultCollection) { System.out.println("Extracted phrases:"); for (String keyPhrase : extractKeyPhraseResult.getKeyPhrases()) { System.out.printf("%s.%n", keyPhrase); @@ -631,7 +439,8 @@ public void analyzeSentiment() { for (SentenceSentiment sentenceSentiment : documentSentiment.getSentences()) { System.out.printf( - "Recognized sentence sentiment: %s, positive score: %.2f, neutral score: %.2f, negative score: %.2f.%n", + "Recognized sentence sentiment: %s, positive score: %.2f, neutral score: %.2f, " + + "negative score: %.2f.%n", sentenceSentiment.getSentiment(), sentenceSentiment.getConfidenceScores().getPositive(), sentenceSentiment.getConfidenceScores().getNeutral(), @@ -663,69 +472,6 @@ public void analyzeSentimentWithLanguage() { // END: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.analyzeSentiment#string-string } - /** - * Code snippet for {@link TextAnalyticsAsyncClient#analyzeSentimentBatch(Iterable)} - */ - public void analyzeSentimentStringList() { - // BEGIN: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.analyzeSentimentBatch#Iterable - List documents = Arrays.asList( - "The hotel was dark and unclean.", "The restaurant had amazing gnocchi."); - - textAnalyticsAsyncClient.analyzeSentimentBatch(documents).byPage().subscribe(response -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = response.getStatistics(); - System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - - response.getElements().forEach(analyzeSentimentResult -> { - System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId()); - DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); - System.out.printf("Recognized document sentiment: %s.%n", documentSentiment.getSentiment()); - documentSentiment.getSentences().forEach(sentenceSentiment -> - System.out.printf("Recognized sentence sentiment: %s, positive score: %.2f, neutral score: %.2f, " - + "negative score: %.2f.%n", - sentenceSentiment.getSentiment(), - sentenceSentiment.getConfidenceScores().getPositive(), - sentenceSentiment.getConfidenceScores().getNeutral(), - sentenceSentiment.getConfidenceScores().getNegative())); - }); - }); - // END: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.analyzeSentimentBatch#Iterable - } - - /** - * Code snippet for {@link TextAnalyticsAsyncClient#analyzeSentimentBatch(Iterable, String)} - */ - public void analyzeSentimentStringListWithLanguageCode() { - // BEGIN: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.analyzeSentimentBatch#Iterable-String - List documents = Arrays.asList( - "The hotel was dark and unclean.", - "The restaurant had amazing gnocchi." - ); - - textAnalyticsAsyncClient.analyzeSentimentBatch(documents, "en").byPage().subscribe( - response -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = response.getStatistics(); - System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - - response.getElements().forEach(analyzeSentimentResult -> { - System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId()); - DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); - System.out.printf("Recognized document sentiment: %s.%n", documentSentiment.getSentiment()); - documentSentiment.getSentences().forEach(sentenceSentiment -> - System.out.printf("Recognized sentence sentiment: %s, positive score: %.2f, " - + "neutral score: %.2f, negative score: %.2f.%n", - sentenceSentiment.getSentiment(), - sentenceSentiment.getConfidenceScores().getPositive(), - sentenceSentiment.getConfidenceScores().getNeutral(), - sentenceSentiment.getConfidenceScores().getNegative())); - }); - }); - // END: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.analyzeSentimentBatch#Iterable-String - } - /** * Code snippet for {@link TextAnalyticsAsyncClient#analyzeSentimentBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @@ -736,14 +482,14 @@ public void analyzeSentimentStringListWithOptions() { "The restaurant had amazing gnocchi." ); - textAnalyticsAsyncClient.analyzeSentimentBatch(documents, "en", null).byPage().subscribe( + textAnalyticsAsyncClient.analyzeSentimentBatch(documents, "en", null).subscribe( response -> { // Batch statistics TextDocumentBatchStatistics batchStatistics = response.getStatistics(); System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - response.getElements().forEach(analyzeSentimentResult -> { + response.forEach(analyzeSentimentResult -> { System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId()); DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); System.out.printf("Recognized document sentiment: %s.%n", documentSentiment.getSentiment()); @@ -760,8 +506,7 @@ public void analyzeSentimentStringListWithOptions() { } /** - * Code snippet for {@link TextAnalyticsAsyncClient#analyzeSentimentBatch(Iterable, - * TextAnalyticsRequestOptions)} + * Code snippet for {@link TextAnalyticsAsyncClient#analyzeSentimentBatchWithResponse(Iterable, TextAnalyticsRequestOptions)} */ public void analyzeBatchSentimentMaxOverload() { // BEGIN: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.analyzeSentimentBatch#Iterable-TextAnalyticsRequestOptions @@ -772,15 +517,19 @@ public void analyzeBatchSentimentMaxOverload() { // Request options: show statistics and model version TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true); - textAnalyticsAsyncClient.analyzeSentimentBatch(textDocumentInputs1, requestOptions).byPage() + textAnalyticsAsyncClient.analyzeSentimentBatchWithResponse(textDocumentInputs1, requestOptions) .subscribe(response -> { + // Response's status code + System.out.printf("Status code of request response: %d%n", response.getStatusCode()); + AnalyzeSentimentResultCollection resultCollection = response.getValue(); + // Batch statistics - TextDocumentBatchStatistics batchStatistics = response.getStatistics(); + TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - response.getElements().forEach(analyzeSentimentResult -> { + resultCollection.forEach(analyzeSentimentResult -> { System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId()); DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); System.out.printf("Recognized document sentiment: %s.%n", documentSentiment.getSentiment()); @@ -795,29 +544,4 @@ public void analyzeBatchSentimentMaxOverload() { }); // END: com.azure.ai.textanalytics.TextAnalyticsAsyncClient.analyzeSentimentBatch#Iterable-TextAnalyticsRequestOptions } - - // Text Analytics Paged flux - public void textAnalyticsPagedFluxSample() { - TextAnalyticsPagedFlux pagedFlux = - textAnalyticsAsyncClient.recognizeEntitiesBatch(Collections.singletonList("")); - // BEGIN: com.azure.ai.textanalytics.util.TextAnalyticsPagedFlux.subscribe - pagedFlux - .log() - .subscribe( - item -> System.out.println("Processing item" + item), - error -> System.err.println("Error occurred " + error), - () -> System.out.println("Completed processing.")); - // END: com.azure.ai.textanalytics.util.TextAnalyticsPagedFlux.subscribe - - // BEGIN: com.azure.ai.textanalytics.util.TextAnalyticsPagedFlux.subscribeByPage - pagedFlux - .byPage() - .log() - .subscribe( - page -> System.out.printf("Processing page containing model version: %s, items:", - page.getModelVersion(), page.getElements()), - error -> System.err.println("Error occurred " + error), - () -> System.out.println("Completed processing.")); - // END: com.azure.ai.textanalytics.util.TextAnalyticsPagedFlux.subscribeByPage - } } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/TextAnalyticsClientJavaDocCodeSnippets.java b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/TextAnalyticsClientJavaDocCodeSnippets.java index e6e08b0fef75..49aa84d7c940 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/TextAnalyticsClientJavaDocCodeSnippets.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/TextAnalyticsClientJavaDocCodeSnippets.java @@ -3,26 +3,27 @@ package com.azure.ai.textanalytics; +import com.azure.ai.textanalytics.util.AnalyzeSentimentResultCollection; import com.azure.ai.textanalytics.models.CategorizedEntity; import com.azure.ai.textanalytics.models.CategorizedEntityCollection; import com.azure.ai.textanalytics.models.DetectLanguageInput; +import com.azure.ai.textanalytics.util.DetectLanguageResultCollection; import com.azure.ai.textanalytics.models.DetectedLanguage; import com.azure.ai.textanalytics.models.DocumentSentiment; -import com.azure.ai.textanalytics.models.RecognizeLinkedEntitiesResult; +import com.azure.ai.textanalytics.util.ExtractKeyPhrasesResultCollection; +import com.azure.ai.textanalytics.util.RecognizeEntitiesResultCollection; +import com.azure.ai.textanalytics.util.RecognizeLinkedEntitiesResultCollection; import com.azure.ai.textanalytics.models.SentenceSentiment; import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; import com.azure.ai.textanalytics.models.TextDocumentInput; -import com.azure.ai.textanalytics.util.TextAnalyticsPagedIterable; import com.azure.core.credential.AzureKeyCredential; import com.azure.core.http.HttpPipeline; import com.azure.core.http.HttpPipelineBuilder; +import com.azure.core.http.rest.Response; import com.azure.core.util.Context; -import java.net.HttpURLConnection; import java.util.Arrays; -import java.util.Collections; -import java.util.Iterator; import java.util.List; /** @@ -85,58 +86,6 @@ public void detectLanguageWithCountryHint() { // END: com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguage#String-String } - /** - * Code snippet for {@link TextAnalyticsClient#detectLanguageBatch(Iterable)} - */ - public void detectLanguageStringList() { - // BEGIN: com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguageBatch#Iterable - List documents = Arrays.asList( - "This is written in English", - "Este es un documento escrito en Español."); - - textAnalyticsClient.detectLanguageBatch(documents).iterableByPage().forEach(response -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = response.getStatistics(); - System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - - response.getElements().forEach(detectLanguageResult -> { - DetectedLanguage detectedLanguage = detectLanguageResult.getPrimaryLanguage(); - System.out.printf("Primary language name: %s, ISO 6391 name: %s, confidence score: %f.%n", - detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getConfidenceScore()); - }); - }); - // END: com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguageBatch#Iterable - } - - /** - * Code snippet for {@link TextAnalyticsClient#detectLanguageBatch(Iterable, String)} - */ - public void detectLanguageStringListWithCountryHint() { - // BEGIN: com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguageBatch#Iterable-String - List documents = Arrays.asList( - "This is written in English", - "Este es un documento escrito en Español." - ); - - textAnalyticsClient.detectLanguageBatch(documents, "US").iterableByPage().forEach( - response -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = response.getStatistics(); - System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - // Batch result of languages - response.getElements().forEach(detectLanguageResult -> { - System.out.printf("Document ID: %s%n", detectLanguageResult.getId()); - DetectedLanguage detectedLanguage = detectLanguageResult.getPrimaryLanguage(); - System.out.printf("Primary language name: %s, ISO 6391 name: %s, confidence score: %f.%n", - detectedLanguage.getName(), detectedLanguage.getIso6391Name(), - detectedLanguage.getConfidenceScore()); - }); - }); - // END: com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguageBatch#Iterable-String - } - /** * Code snippet for {@link TextAnalyticsClient#detectLanguageBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @@ -147,27 +96,27 @@ public void detectLanguageStringListWithOptions() { "Este es un documento escrito en Español." ); - textAnalyticsClient.detectLanguageBatch(documents, "US", null).iterableByPage().forEach( - response -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = response.getStatistics(); - System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - // Batch result of languages - response.getElements().forEach(detectLanguageResult -> { - System.out.printf("Document ID: %s%n", detectLanguageResult.getId()); - DetectedLanguage detectedLanguage = detectLanguageResult.getPrimaryLanguage(); - System.out.printf("Primary language name: %s, ISO 6391 name: %s, confidence score: %f.%n", - detectedLanguage.getName(), detectedLanguage.getIso6391Name(), - detectedLanguage.getConfidenceScore()); - }); - }); + DetectLanguageResultCollection resultCollection = + textAnalyticsClient.detectLanguageBatch(documents, "US", null); + + // Batch statistics + TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); + System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n", + batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + + // Batch result of languages + resultCollection.forEach(detectLanguageResult -> { + System.out.printf("Document ID: %s%n", detectLanguageResult.getId()); + DetectedLanguage detectedLanguage = detectLanguageResult.getPrimaryLanguage(); + System.out.printf("Primary language name: %s, ISO 6391 name: %s, confidence score: %f.%n", + detectedLanguage.getName(), detectedLanguage.getIso6391Name(), + detectedLanguage.getConfidenceScore()); + }); // END: com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguageBatch#Iterable-String-TextAnalyticsRequestOptions } /** - * Code snippet for {@link TextAnalyticsClient#detectLanguageBatch(Iterable, TextAnalyticsRequestOptions, - * Context)} + * Code snippet for {@link TextAnalyticsClient#detectLanguageBatchWithResponse(Iterable, TextAnalyticsRequestOptions, Context)} */ public void detectBatchLanguagesMaxOverload() { // BEGIN: com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguageBatch#Iterable-TextAnalyticsRequestOptions-Context @@ -176,23 +125,30 @@ public void detectBatchLanguagesMaxOverload() { new DetectLanguageInput("2", "Este es un documento escrito en Español.", "es") ); - textAnalyticsClient.detectLanguageBatch(detectLanguageInputs, - new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE).iterableByPage().forEach( - response -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = response.getStatistics(); - System.out.printf( - "A batch of documents statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - // Batch result of languages - response.getElements().forEach(detectLanguageResult -> { - System.out.printf("Document ID: %s%n", detectLanguageResult.getId()); - DetectedLanguage detectedLanguage = detectLanguageResult.getPrimaryLanguage(); - System.out.printf("Primary language name: %s, ISO 6391 name: %s, confidence score: %f.%n", - detectedLanguage.getName(), detectedLanguage.getIso6391Name(), - detectedLanguage.getConfidenceScore()); - }); - }); + Response response = + textAnalyticsClient.detectLanguageBatchWithResponse(detectLanguageInputs, + new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE); + + // Response's status code + System.out.printf("Status code of request response: %d%n", response.getStatusCode()); + DetectLanguageResultCollection detectedLanguageResultCollection = response.getValue(); + + // Batch statistics + TextDocumentBatchStatistics batchStatistics = detectedLanguageResultCollection.getStatistics(); + System.out.printf( + "Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s," + + " valid document count = %s.%n", + batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), + batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + + // Batch result of languages + detectedLanguageResultCollection.forEach(detectLanguageResult -> { + System.out.printf("Document ID: %s%n", detectLanguageResult.getId()); + DetectedLanguage detectedLanguage = detectLanguageResult.getPrimaryLanguage(); + System.out.printf("Primary language name: %s, ISO 6391 name: %s, confidence score: %f.%n", + detectedLanguage.getName(), detectedLanguage.getIso6391Name(), + detectedLanguage.getConfidenceScore()); + }); // END: com.azure.ai.textanalytics.TextAnalyticsClient.detectLanguageBatch#Iterable-TextAnalyticsRequestOptions-Context } @@ -227,55 +183,6 @@ public void recognizeEntitiesWithLanguage() { // END: com.azure.ai.textanalytics.TextAnalyticsClient.recognizeCategorizedEntities#String-String } - /** - * Code snippet for {@link TextAnalyticsClient#recognizeEntitiesBatch(Iterable)} - */ - public void recognizeEntitiesStringList() { - // BEGIN: com.azure.ai.textanalytics.TextAnalyticsClient.recognizeCategorizedEntitiesBatch#Iterable - final List documents = Arrays.asList( - "I had a wonderful trip to Seattle last week.", - "I work at Microsoft."); - - textAnalyticsClient.recognizeEntitiesBatch(documents).iterableByPage().forEach(response -> { - // Batch statistics - final TextDocumentBatchStatistics batchStatistics = response.getStatistics(); - System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - - response.getElements().forEach(recognizeEntitiesResult -> - recognizeEntitiesResult.getEntities().forEach(entity -> - System.out.printf("Recognized entity: %s, entity category: %s, confidence score: %f.%n", - entity.getText(), entity.getCategory(), entity.getConfidenceScore()))); - }); - // END: com.azure.ai.textanalytics.TextAnalyticsClient.recognizeCategorizedEntitiesBatch#Iterable - } - - /** - * Code snippet for {@link TextAnalyticsClient#recognizeEntitiesBatch(Iterable, String)} - */ - public void recognizeEntitiesStringListWithLanguageCode() { - // BEGIN: com.azure.ai.textanalytics.TextAnalyticsClient.recognizeCategorizedEntitiesBatch#Iterable-String - List documents = Arrays.asList( - "I had a wonderful trip to Seattle last week.", - "I work at Microsoft."); - - textAnalyticsClient.recognizeEntitiesBatch(documents, "en").iterableByPage() - .forEach(response -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = response.getStatistics(); - System.out.printf( - "A batch of documents statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - - response.getElements().forEach(recognizeEntitiesResult -> - recognizeEntitiesResult.getEntities().forEach(entity -> { - System.out.printf("Recognized entity: %s, entity category: %s, confidence score: %f.%n", - entity.getText(), entity.getCategory(), entity.getConfidenceScore()); - })); - }); - // END: com.azure.ai.textanalytics.TextAnalyticsClient.recognizeCategorizedEntitiesBatch#Iterable-String - } - /** * Code snippet for {@link TextAnalyticsClient#recognizeEntitiesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @@ -285,26 +192,24 @@ public void recognizeEntitiesStringListWithOptions() { "I had a wonderful trip to Seattle last week.", "I work at Microsoft."); - textAnalyticsClient.recognizeEntitiesBatch(documents, "en", null).iterableByPage() - .forEach(response -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = response.getStatistics(); - System.out.printf( - "A batch of documents statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - - response.getElements().forEach(recognizeEntitiesResult -> - recognizeEntitiesResult.getEntities().forEach(entity -> { - System.out.printf("Recognized entity: %s, entity category: %s, confidence score: %f.%n", - entity.getText(), entity.getCategory(), entity.getConfidenceScore()); - })); - }); + RecognizeEntitiesResultCollection resultCollection = + textAnalyticsClient.recognizeEntitiesBatch(documents, "en", null); + + // Batch statistics + TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); + System.out.printf( + "A batch of documents statistics, transaction count: %s, valid document count: %s.%n", + batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + + resultCollection.forEach(recognizeEntitiesResult -> + recognizeEntitiesResult.getEntities().forEach(entity -> + System.out.printf("Recognized entity: %s, entity category: %s, confidence score: %f.%n", + entity.getText(), entity.getCategory(), entity.getConfidenceScore()))); // END: com.azure.ai.textanalytics.TextAnalyticsClient.recognizeCategorizedEntitiesBatch#Iterable-String-TextAnalyticsRequestOptions } /** - * Code snippet for {@link TextAnalyticsClient#recognizeEntitiesBatch(Iterable, TextAnalyticsRequestOptions, - * Context)} + * Code snippet for {@link TextAnalyticsClient#recognizeEntitiesBatchWithResponse(Iterable, TextAnalyticsRequestOptions, Context)} */ public void recognizeBatchEntitiesMaxOverload() { // BEGIN: com.azure.ai.textanalytics.TextAnalyticsClient.recognizeEntitiesBatch#Iterable-TextAnalyticsRequestOptions-Context @@ -313,21 +218,24 @@ public void recognizeBatchEntitiesMaxOverload() { new TextDocumentInput("1", "I work at Microsoft.").setLanguage("en") ); - textAnalyticsClient.recognizeEntitiesBatch(textDocumentInputs, - new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE).iterableByPage().forEach( - response -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = response.getStatistics(); - System.out.printf( - "A batch of documents statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - - response.getElements().forEach(recognizeEntitiesResult -> - recognizeEntitiesResult.getEntities().forEach(entity -> { - System.out.printf("Recognized entity: %s, entity category: %s, confidence score: %f.%n", - entity.getText(), entity.getCategory(), entity.getConfidenceScore()); - })); - }); + Response response = + textAnalyticsClient.recognizeEntitiesBatchWithResponse(textDocumentInputs, + new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE); + + // Response's status code + System.out.printf("Status code of request response: %d%n", response.getStatusCode()); + RecognizeEntitiesResultCollection recognizeEntitiesResultCollection = response.getValue(); + + // Batch statistics + TextDocumentBatchStatistics batchStatistics = recognizeEntitiesResultCollection.getStatistics(); + System.out.printf( + "A batch of documents statistics, transaction count: %s, valid document count: %s.%n", + batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + + recognizeEntitiesResultCollection.forEach(recognizeEntitiesResult -> + recognizeEntitiesResult.getEntities().forEach(entity -> + System.out.printf("Recognized entity: %s, entity category: %s, confidence score: %f.%n", + entity.getText(), entity.getCategory(), entity.getConfidenceScore()))); // END: com.azure.ai.textanalytics.TextAnalyticsClient.recognizeEntitiesBatch#Iterable-TextAnalyticsRequestOptions-Context } @@ -345,7 +253,8 @@ public void recognizeLinkedEntities() { linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(), linkedEntity.getDataSource()); linkedEntity.getMatches().forEach(entityMatch -> System.out.printf( - "Matched entity: %s, confidence score: %f.%n", entityMatch.getText(), entityMatch.getConfidenceScore())); + "Matched entity: %s, confidence score: %f.%n", + entityMatch.getText(), entityMatch.getConfidenceScore())); }); // END: com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntities#String } @@ -367,66 +276,6 @@ public void recognizeLinkedEntitiesWithLanguage() { // END: com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntities#String-String } - /** - * Code snippet for {@link TextAnalyticsClient#recognizeLinkedEntitiesBatch(Iterable)} - */ - public void recognizeLinkedEntitiesStringList() { - // BEGIN: com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntitiesBatch#Iterable - final List documents = Arrays.asList( - "Old Faithful is a geyser at Yellowstone Park.", - "Mount Shasta has lenticular clouds."); - - textAnalyticsClient.recognizeLinkedEntitiesBatch(documents).iterableByPage().forEach(response -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = response.getStatistics(); - System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - - response.getElements().forEach(recognizeLinkedEntitiesResult -> - recognizeLinkedEntitiesResult.getEntities().forEach(linkedEntity -> { - System.out.println("Linked Entities:"); - System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n", - linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(), - linkedEntity.getDataSource()); - linkedEntity.getMatches().forEach(entityMatch -> System.out.printf( - "Matched entity: %s, confidence score: %f.%n", - entityMatch.getText(), entityMatch.getConfidenceScore())); - })); - }); - // END: com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntitiesBatch#Iterable - } - - /** - * Code snippet for {@link TextAnalyticsClient#recognizeLinkedEntitiesBatch(Iterable, String)} - */ - public void recognizeLinkedEntitiesStringListWithLanguageCode() { - // BEGIN: com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntitiesBatch#Iterable-String - List documents = Arrays.asList( - "Old Faithful is a geyser at Yellowstone Park.", - "Mount Shasta has lenticular clouds." - ); - - textAnalyticsClient.recognizeLinkedEntitiesBatch(documents, "en").iterableByPage() - .forEach(response -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = response.getStatistics(); - System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - - response.getElements().forEach(recognizeLinkedEntitiesResult -> - recognizeLinkedEntitiesResult.getEntities().forEach(linkedEntity -> { - System.out.println("Linked Entities:"); - System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n", - linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(), - linkedEntity.getDataSource()); - linkedEntity.getMatches().forEach(entityMatch -> System.out.printf( - "Matched entity: %s, confidence score: %f.%n", - entityMatch.getText(), entityMatch.getConfidenceScore())); - })); - }); - // END: com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntitiesBatch#Iterable-String - } - /** * Code snippet for {@link TextAnalyticsClient#recognizeLinkedEntitiesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @@ -437,29 +286,29 @@ public void recognizeLinkedEntitiesStringListWithOptions() { "Mount Shasta has lenticular clouds." ); - textAnalyticsClient.recognizeLinkedEntitiesBatch(documents, "en", null).iterableByPage() - .forEach(response -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = response.getStatistics(); - System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - - response.getElements().forEach(recognizeLinkedEntitiesResult -> - recognizeLinkedEntitiesResult.getEntities().forEach(linkedEntity -> { - System.out.println("Linked Entities:"); - System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n", - linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(), - linkedEntity.getDataSource()); - linkedEntity.getMatches().forEach(entityMatch -> System.out.printf( - "Matched entity: %s, confidence score: %f.%n", - entityMatch.getText(), entityMatch.getConfidenceScore())); - })); - }); + RecognizeLinkedEntitiesResultCollection resultCollection = + textAnalyticsClient.recognizeLinkedEntitiesBatch(documents, "en", null); + + // Batch statistics + TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); + System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n", + batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + + resultCollection.forEach(recognizeLinkedEntitiesResult -> + recognizeLinkedEntitiesResult.getEntities().forEach(linkedEntity -> { + System.out.println("Linked Entities:"); + System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n", + linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(), + linkedEntity.getDataSource()); + linkedEntity.getMatches().forEach(entityMatch -> System.out.printf( + "Matched entity: %s, confidence score: %f.%n", + entityMatch.getText(), entityMatch.getConfidenceScore())); + })); // END: com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntitiesBatch#Iterable-String-TextAnalyticsRequestOptions } /** - * Code snippet for {@link TextAnalyticsClient#recognizeLinkedEntitiesBatch(Iterable, TextAnalyticsRequestOptions, Context)} + * Code snippet for {@link TextAnalyticsClient#recognizeLinkedEntitiesBatchWithResponse(Iterable, TextAnalyticsRequestOptions, Context)} */ public void recognizeLinkedEntitiesBatchMaxOverload() { // BEGIN: com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntitiesBatch#Iterable-TextAnalyticsRequestOptions-Context @@ -468,26 +317,30 @@ public void recognizeLinkedEntitiesBatchMaxOverload() { new TextDocumentInput("2", "Mount Shasta has lenticular clouds.").setLanguage("en") ); - textAnalyticsClient.recognizeLinkedEntitiesBatch(textDocumentInputs, - new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE).iterableByPage().forEach( - response -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = response.getStatistics(); - System.out.printf( - "A batch of documents statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - - response.getElements().forEach(recognizeLinkedEntitiesResult -> - recognizeLinkedEntitiesResult.getEntities().forEach(linkedEntity -> { - System.out.println("Linked Entities:"); - System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n", - linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(), - linkedEntity.getDataSource()); - linkedEntity.getMatches().forEach(entityMatch -> System.out.printf( - "Matched entity: %s, confidence score: %.2f.%n", - entityMatch.getText(), entityMatch.getConfidenceScore())); - })); - }); + Response response = + textAnalyticsClient.recognizeLinkedEntitiesBatchWithResponse(textDocumentInputs, + new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE); + + // Response's status code + System.out.printf("Status code of request response: %d%n", response.getStatusCode()); + RecognizeLinkedEntitiesResultCollection resultCollection = response.getValue(); + + // Batch statistics + TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); + System.out.printf( + "A batch of documents statistics, transaction count: %s, valid document count: %s.%n", + batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + + resultCollection.forEach(recognizeLinkedEntitiesResult -> + recognizeLinkedEntitiesResult.getEntities().forEach(linkedEntity -> { + System.out.println("Linked Entities:"); + System.out.printf("Name: %s, entity ID in data source: %s, URL: %s, data source: %s.%n", + linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(), + linkedEntity.getDataSource()); + linkedEntity.getMatches().forEach(entityMatch -> System.out.printf( + "Matched entity: %s, confidence score: %.2f.%n", + entityMatch.getText(), entityMatch.getConfidenceScore())); + })); // END: com.azure.ai.textanalytics.TextAnalyticsClient.recognizeLinkedEntitiesBatch#Iterable-TextAnalyticsRequestOptions-Context } @@ -516,64 +369,6 @@ public void extractKeyPhrasesWithLanguage() { // END: com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrases#String-String-Context } - /** - * Code snippet for {@link TextAnalyticsClient#extractKeyPhrasesBatch(Iterable)} - */ - public void extractKeyPhrasesStringList() { - // BEGIN: com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrasesBatch#Iterable - final List documents = Arrays.asList( - "My cat might need to see a veterinarian.", - "The pitot tube is used to measure airspeed." - ); - - // Extracting batch key phrases - textAnalyticsClient.extractKeyPhrasesBatch(documents).iterableByPage().forEach(response -> { - // Batch statistics - final TextDocumentBatchStatistics batchStatistics = response.getStatistics(); - System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - - // Extracted key phrase for each of documents from a batch of documents - response.getElements().forEach(extractKeyPhraseResult -> { - System.out.printf("Document ID: %s%n", extractKeyPhraseResult.getId()); - // Valid document - System.out.println("Extracted phrases:"); - extractKeyPhraseResult.getKeyPhrases().forEach(keyPhrase -> System.out.printf("%s.%n", keyPhrase)); - }); - }); - // END: com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrasesBatch#Iterable - } - - /** - * Code snippet for {@link TextAnalyticsClient#extractKeyPhrasesBatch(Iterable, String)} - */ - public void extractKeyPhrasesStringListWithLanguageCode() { - // BEGIN: com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrasesBatch#Iterable-String - List documents = Arrays.asList( - "My cat might need to see a veterinarian.", - "The pitot tube is used to measure airspeed." - ); - - // Extracting batch key phrases - textAnalyticsClient.extractKeyPhrasesBatch(documents, "en").iterableByPage().forEach( - response -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = response.getStatistics(); - System.out.printf( - "A batch of documents statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - - // Extracted key phrase for each of documents from a batch of documents - response.getElements().forEach(extractKeyPhraseResult -> { - System.out.printf("Document ID: %s%n", extractKeyPhraseResult.getId()); - // Valid document - System.out.println("Extracted phrases:"); - extractKeyPhraseResult.getKeyPhrases().forEach(keyPhrase -> System.out.printf("%s.%n", keyPhrase)); - }); - }); - // END: com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrasesBatch#Iterable-String - } - /** * Code snippet for {@link TextAnalyticsClient#extractKeyPhrasesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @@ -585,28 +380,27 @@ public void extractKeyPhrasesStringListWithOptions() { ); // Extracting batch key phrases - textAnalyticsClient.extractKeyPhrasesBatch(documents, "en", null).iterableByPage().forEach( - response -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = response.getStatistics(); - System.out.printf( - "A batch of documents statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - - // Extracted key phrase for each of documents from a batch of documents - response.getElements().forEach(extractKeyPhraseResult -> { - System.out.printf("Document ID: %s%n", extractKeyPhraseResult.getId()); - // Valid document - System.out.println("Extracted phrases:"); - extractKeyPhraseResult.getKeyPhrases().forEach(keyPhrase -> System.out.printf("%s.%n", keyPhrase)); - }); - }); + ExtractKeyPhrasesResultCollection resultCollection = + textAnalyticsClient.extractKeyPhrasesBatch(documents, "en", null); + + // Batch statistics + TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); + System.out.printf( + "A batch of documents statistics, transaction count: %s, valid document count: %s.%n", + batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + + // Extracted key phrase for each of documents from a batch of documents + resultCollection.forEach(extractKeyPhraseResult -> { + System.out.printf("Document ID: %s%n", extractKeyPhraseResult.getId()); + // Valid document + System.out.println("Extracted phrases:"); + extractKeyPhraseResult.getKeyPhrases().forEach(keyPhrase -> System.out.printf("%s.%n", keyPhrase)); + }); // END: com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrasesBatch#Iterable-String-TextAnalyticsRequestOptions } /** - * Code snippet for {@link TextAnalyticsClient#extractKeyPhrasesBatch(Iterable, TextAnalyticsRequestOptions, - * Context)} + * Code snippet for {@link TextAnalyticsClient#extractKeyPhrasesBatchWithResponse(Iterable, TextAnalyticsRequestOptions, Context)} */ public void extractBatchKeyPhrasesMaxOverload() { // BEGIN: com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrasesBatch#Iterable-TextAnalyticsRequestOptions-Context @@ -616,24 +410,29 @@ public void extractBatchKeyPhrasesMaxOverload() { ); // Extracting batch key phrases - textAnalyticsClient.extractKeyPhrasesBatch(textDocumentInputs, - new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE).iterableByPage().forEach( - response -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = response.getStatistics(); - System.out.printf( - "A batch of documents statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - - // Extracted key phrase for each of documents from a batch of documents - response.getElements().forEach(extractKeyPhraseResult -> { - System.out.printf("Document ID: %s%n", extractKeyPhraseResult.getId()); - // Valid document - System.out.println("Extracted phrases:"); - extractKeyPhraseResult.getKeyPhrases().forEach(keyPhrase -> - System.out.printf("%s.%n", keyPhrase)); - }); - }); + Response response = + textAnalyticsClient.extractKeyPhrasesBatchWithResponse(textDocumentInputs, + new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE); + + + // Response's status code + System.out.printf("Status code of request response: %d%n", response.getStatusCode()); + ExtractKeyPhrasesResultCollection resultCollection = response.getValue(); + + // Batch statistics + TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); + System.out.printf( + "A batch of documents statistics, transaction count: %s, valid document count: %s.%n", + batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + + // Extracted key phrase for each of documents from a batch of documents + resultCollection.forEach(extractKeyPhraseResult -> { + System.out.printf("Document ID: %s%n", extractKeyPhraseResult.getId()); + // Valid document + System.out.println("Extracted phrases:"); + extractKeyPhraseResult.getKeyPhrases().forEach(keyPhrase -> + System.out.printf("%s.%n", keyPhrase)); + }); // END: com.azure.ai.textanalytics.TextAnalyticsClient.extractKeyPhrasesBatch#Iterable-TextAnalyticsRequestOptions-Context } @@ -647,7 +446,8 @@ public void analyzeSentiment() { final DocumentSentiment documentSentiment = textAnalyticsClient.analyzeSentiment("The hotel was dark and unclean."); - System.out.printf("Recognized sentiment: %s, positive score: %.2f, neutral score: %.2f, negative score: %.2f.%n", + System.out.printf( + "Recognized sentiment: %s, positive score: %.2f, neutral score: %.2f, negative score: %.2f.%n", documentSentiment.getSentiment(), documentSentiment.getConfidenceScores().getPositive(), documentSentiment.getConfidenceScores().getNeutral(), @@ -690,90 +490,6 @@ public void analyzeSentimentWithLanguage() { // END: com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentiment#String-String } - /** - * Code snippet for {@link TextAnalyticsClient#analyzeSentimentBatch(Iterable)} - */ - public void analyzeSentimentStringList() { - // BEGIN: com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable - final List documents = Arrays.asList( - "The hotel was dark and unclean. The restaurant had amazing gnocchi.", - "The restaurant had amazing gnocchi. The hotel was dark and unclean." - ); - - // Analyzing batch sentiments - textAnalyticsClient.analyzeSentimentBatch(documents).iterableByPage().forEach(response -> { - // Batch statistics - final TextDocumentBatchStatistics batchStatistics = response.getStatistics(); - System.out.printf( - "A batch of documents statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - - // Analyzed sentiment for each of documents from a batch of documents - response.getElements().forEach(analyzeSentimentResult -> { - System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId()); - // Valid document - final DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); - System.out.printf( - "Recognized document sentiment: %s, positive score: %.2f, neutral score: %.2f, " - + "negative score: %.2f.%n", - documentSentiment.getSentiment(), - documentSentiment.getConfidenceScores().getPositive(), - documentSentiment.getConfidenceScores().getNeutral(), - documentSentiment.getConfidenceScores().getNegative()); - documentSentiment.getSentences().forEach(sentenceSentiment -> System.out.printf( - "Recognized sentence sentiment: %s, positive score: %.2f, neutral score: %.2f," - + " negative score: %.2f.%n", - sentenceSentiment.getSentiment(), - sentenceSentiment.getConfidenceScores().getPositive(), - sentenceSentiment.getConfidenceScores().getNeutral(), - sentenceSentiment.getConfidenceScores().getNegative())); - }); - }); - // END: com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable - } - - /** - * Code snippet for {@link TextAnalyticsClient#analyzeSentimentBatch(Iterable, String)} - */ - public void analyzeSentimentStringListWithLanguageCode() { - // BEGIN: com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable-String - List documents = Arrays.asList( - "The hotel was dark and unclean. The restaurant had amazing gnocchi.", - "The restaurant had amazing gnocchi. The hotel was dark and unclean." - ); - - // Analyzing batch sentiments - textAnalyticsClient.analyzeSentimentBatch(documents, "en").iterableByPage() - .forEach(response -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = response.getStatistics(); - System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - - // Analyzed sentiment for each of documents from a batch of documents - response.getElements().forEach(analyzeSentimentResult -> { - System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId()); - // Valid document - DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); - System.out.printf( - "Recognized document sentiment: %s, positive score: %.2f, neutral score: %.2f," - + " negative score: %.2f.%n", - documentSentiment.getSentiment(), - documentSentiment.getConfidenceScores().getPositive(), - documentSentiment.getConfidenceScores().getNeutral(), - documentSentiment.getConfidenceScores().getNegative()); - documentSentiment.getSentences().forEach(sentenceSentiment -> System.out.printf( - "Recognized sentence sentiment: %s, positive score: %.2f, neutral score: %.2f," - + " negative score: %.2f.%n", - sentenceSentiment.getSentiment(), - sentenceSentiment.getConfidenceScores().getPositive(), - sentenceSentiment.getConfidenceScores().getNeutral(), - sentenceSentiment.getConfidenceScores().getNegative())); - }); - }); - // END: com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable-String - } - /** * Code snippet for {@link TextAnalyticsClient#analyzeSentimentBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @@ -785,117 +501,85 @@ public void analyzeSentimentStringListWithOptions() { ); // Analyzing batch sentiments - textAnalyticsClient.analyzeSentimentBatch(documents, "en", null).iterableByPage() - .forEach(response -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = response.getStatistics(); - System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - - // Analyzed sentiment for each of documents from a batch of documents - response.getElements().forEach(analyzeSentimentResult -> { - System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId()); - // Valid document - DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); - System.out.printf( - "Recognized document sentiment: %s, positive score: %.2f, neutral score: %.2f," - + " negative score: %.2f.%n", - documentSentiment.getSentiment(), - documentSentiment.getConfidenceScores().getPositive(), - documentSentiment.getConfidenceScores().getNeutral(), - documentSentiment.getConfidenceScores().getNegative()); - documentSentiment.getSentences().forEach(sentenceSentiment -> System.out.printf( - "Recognized sentence sentiment: %s, positive score: %.2f, neutral score: %.2f," - + " negative score: %.2f.%n", - sentenceSentiment.getSentiment(), - sentenceSentiment.getConfidenceScores().getPositive(), - sentenceSentiment.getConfidenceScores().getNeutral(), - sentenceSentiment.getConfidenceScores().getNegative())); - }); - }); + AnalyzeSentimentResultCollection resultCollection = + textAnalyticsClient.analyzeSentimentBatch(documents, "en", null); + + // Batch statistics + TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); + System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n", + batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + + // Analyzed sentiment for each of documents from a batch of documents + resultCollection.forEach(analyzeSentimentResult -> { + System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId()); + // Valid document + DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); + System.out.printf( + "Recognized document sentiment: %s, positive score: %.2f, neutral score: %.2f," + + " negative score: %.2f.%n", + documentSentiment.getSentiment(), + documentSentiment.getConfidenceScores().getPositive(), + documentSentiment.getConfidenceScores().getNeutral(), + documentSentiment.getConfidenceScores().getNegative()); + documentSentiment.getSentences().forEach(sentenceSentiment -> System.out.printf( + "Recognized sentence sentiment: %s, positive score: %.2f, neutral score: %.2f," + + " negative score: %.2f.%n", + sentenceSentiment.getSentiment(), + sentenceSentiment.getConfidenceScores().getPositive(), + sentenceSentiment.getConfidenceScores().getNeutral(), + sentenceSentiment.getConfidenceScores().getNegative())); + }); // END: com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable-String-TextAnalyticsRequestOptions } /** - * Code snippet for {@link TextAnalyticsClient#analyzeSentimentBatch(Iterable, TextAnalyticsRequestOptions, Context)} + * Code snippet for {@link TextAnalyticsClient#analyzeSentimentBatchWithResponse(Iterable, TextAnalyticsRequestOptions, Context)} */ public void analyzeBatchSentimentMaxOverload() { // BEGIN: com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable-TextAnalyticsRequestOptions-Context List textDocumentInputs = Arrays.asList( - new TextDocumentInput("1", "The hotel was dark and unclean. The restaurant had amazing gnocchi.").setLanguage("en"), - new TextDocumentInput("2", "The restaurant had amazing gnocchi. The hotel was dark and unclean.").setLanguage("en") + new TextDocumentInput("1", "The hotel was dark and unclean. The restaurant had amazing gnocchi.") + .setLanguage("en"), + new TextDocumentInput("2", "The restaurant had amazing gnocchi. The hotel was dark and unclean.") + .setLanguage("en") ); // Analyzing batch sentiments - textAnalyticsClient.analyzeSentimentBatch(textDocumentInputs, - new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE).iterableByPage() - .forEach(response -> { - // Batch statistics - TextDocumentBatchStatistics batchStatistics = response.getStatistics(); - System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n", - batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - - // Analyzed sentiment for each of documents from a batch of documents - response.getElements().forEach(analyzeSentimentResult -> { - System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId()); - // Valid document - DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); - System.out.printf( - "Recognized document sentiment: %s, positive score: %.2f, neutral score: %.2f, " - + "negative score: %.2f.%n", - documentSentiment.getSentiment(), - documentSentiment.getConfidenceScores().getPositive(), - documentSentiment.getConfidenceScores().getNeutral(), - documentSentiment.getConfidenceScores().getNegative()); - documentSentiment.getSentences().forEach(sentenceSentiment -> { - System.out.printf( - "Recognized sentence sentiment: %s, positive score: %.2f, neutral score: %.2f," - + " negative score: %.2f.%n", - sentenceSentiment.getSentiment(), - sentenceSentiment.getConfidenceScores().getPositive(), - sentenceSentiment.getConfidenceScores().getNeutral(), - sentenceSentiment.getConfidenceScores().getNegative()); - }); - }); + Response response = + textAnalyticsClient.analyzeSentimentBatchWithResponse(textDocumentInputs, + new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE); + + // Response's status code + System.out.printf("Status code of request response: %d%n", response.getStatusCode()); + AnalyzeSentimentResultCollection resultCollection = response.getValue(); + + // Batch statistics + TextDocumentBatchStatistics batchStatistics = resultCollection.getStatistics(); + System.out.printf("A batch of documents statistics, transaction count: %s, valid document count: %s.%n", + batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + + // Analyzed sentiment for each of documents from a batch of documents + resultCollection.forEach(analyzeSentimentResult -> { + System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId()); + // Valid document + DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); + System.out.printf( + "Recognized document sentiment: %s, positive score: %.2f, neutral score: %.2f, " + + "negative score: %.2f.%n", + documentSentiment.getSentiment(), + documentSentiment.getConfidenceScores().getPositive(), + documentSentiment.getConfidenceScores().getNeutral(), + documentSentiment.getConfidenceScores().getNegative()); + documentSentiment.getSentences().forEach(sentenceSentiment -> { + System.out.printf( + "Recognized sentence sentiment: %s, positive score: %.2f, neutral score: %.2f," + + " negative score: %.2f.%n", + sentenceSentiment.getSentiment(), + sentenceSentiment.getConfidenceScores().getPositive(), + sentenceSentiment.getConfidenceScores().getNeutral(), + sentenceSentiment.getConfidenceScores().getNegative()); }); - // END: com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable-TextAnalyticsRequestOptions-Context - } - - /** - * Code snippet for {@Link TextAnalyticsPagedIterable} - */ - public void textAnalyticsPagedIterableSample() { - TextAnalyticsPagedIterable pagedIterable = - textAnalyticsClient.recognizeLinkedEntitiesBatch(Collections.singleton("")); - // BEGIN: com.azure.ai.textanalytics.util.TextAnalyticsPagedIterable.stream - pagedIterable.stream().forEach(item -> System.out.println("Processing item" + item)); - // END: com.azure.ai.textanalytics.util.TextAnalyticsPagedIterable.stream - - // BEGIN: com.azure.ai.textanalytics.util.TextAnalyticsPagedIterable.iterator - Iterator iterator = pagedIterable.iterator(); - while (iterator.hasNext()) { - System.out.println("Processing item" + iterator.next()); - } - // END: com.azure.ai.textanalytics.util.TextAnalyticsPagedIterable.iterator - - // BEGIN: com.azure.ai.textanalytics.util.TextAnalyticsPagedIterable.streamByPage - pagedIterable.streamByPage().forEach(resp -> { - if (resp.getStatusCode() == HttpURLConnection.HTTP_OK) { - System.out.printf("Response headers are %s. Url %s%n", resp.getDeserializedHeaders(), - resp.getRequest().getUrl()); - resp.getElements().forEach(value -> System.out.printf("Response value is %s%n", value)); - } }); - // END: com.azure.ai.textanalytics.util.TextAnalyticsPagedIterable.streamByPage - - // BEGIN: com.azure.ai.textanalytics.util.TextAnalyticsPagedIterable.iterableByPage - pagedIterable.iterableByPage().forEach(resp -> { - if (resp.getStatusCode() == HttpURLConnection.HTTP_OK) { - System.out.printf("Response headers are %s. Url %s%n", resp.getDeserializedHeaders(), - resp.getRequest().getUrl()); - resp.getElements().forEach(value -> System.out.printf("Response value is %s%n", value)); - } - }); - // END: com.azure.ai.textanalytics.util.TextAnalyticsPagedIterable.iterableByPage + // END: com.azure.ai.textanalytics.TextAnalyticsClient.analyzeSentimentBatch#Iterable-TextAnalyticsRequestOptions-Context } } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/AnalyzeSentimentBatchDocuments.java b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/AnalyzeSentimentBatchDocuments.java index aa25ed9ae8e4..3c42c3da9531 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/AnalyzeSentimentBatchDocuments.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/AnalyzeSentimentBatchDocuments.java @@ -5,14 +5,14 @@ import com.azure.ai.textanalytics.TextAnalyticsClient; import com.azure.ai.textanalytics.TextAnalyticsClientBuilder; -import com.azure.ai.textanalytics.models.AnalyzeSentimentResult; +import com.azure.ai.textanalytics.util.AnalyzeSentimentResultCollection; import com.azure.ai.textanalytics.models.DocumentSentiment; import com.azure.ai.textanalytics.models.SentimentConfidenceScores; import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; import com.azure.ai.textanalytics.models.TextDocumentInput; -import com.azure.ai.textanalytics.util.TextAnalyticsPagedResponse; import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.http.rest.Response; import com.azure.core.util.Context; import java.util.Arrays; @@ -46,37 +46,40 @@ public static void main(String[] args) { TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true).setModelVersion("latest"); // Analyzing sentiment for each document in a batch of documents - Iterable> sentimentBatchResult = - client.analyzeSentimentBatch(documents, requestOptions, Context.NONE).iterableByPage(); + Response sentimentBatchResultResponse = + client.analyzeSentimentBatchWithResponse(documents, requestOptions, Context.NONE); - sentimentBatchResult.forEach(pagedResponse -> { - System.out.printf("Results of Azure Text Analytics \"Sentiment Analysis\" Model, version: %s%n", pagedResponse.getModelVersion()); + // Response's status code + System.out.printf("Status code of request response: %d%n", sentimentBatchResultResponse.getStatusCode()); + AnalyzeSentimentResultCollection sentimentBatchResultCollection = sentimentBatchResultResponse.getValue(); - // Batch statistics - TextDocumentBatchStatistics batchStatistics = pagedResponse.getStatistics(); - System.out.printf("Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s, valid document count = %s.%n", - batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + // Model version + System.out.printf("Results of Azure Text Analytics \"Sentiment Analysis\" Model, version: %s%n", sentimentBatchResultCollection.getModelVersion()); - // Analyzed sentiment for each document in a batch of documents - AtomicInteger counter = new AtomicInteger(); - for (AnalyzeSentimentResult analyzeSentimentResult : pagedResponse.getElements()) { - System.out.printf("%n%s%n", documents.get(counter.getAndIncrement())); - if (analyzeSentimentResult.isError()) { - // Erroneous document - System.out.printf("Cannot analyze sentiment. Error: %s%n", analyzeSentimentResult.getError().getMessage()); - } else { - // Valid document - DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); - SentimentConfidenceScores scores = documentSentiment.getConfidenceScores(); - System.out.printf("Analyzed document sentiment: %s, positive score: %f, neutral score: %f, negative score: %f.%n", - documentSentiment.getSentiment(), scores.getPositive(), scores.getNeutral(), scores.getNegative()); - documentSentiment.getSentences().forEach(sentenceSentiment -> { - SentimentConfidenceScores sentenceScores = sentenceSentiment.getConfidenceScores(); - System.out.printf( - "\tAnalyzed sentence sentiment: %s, positive score: %f, neutral score: %f, negative score: %f.%n", - sentenceSentiment.getSentiment(), sentenceScores.getPositive(), sentenceScores.getNeutral(), sentenceScores.getNegative()); - }); - } + // Batch statistics + TextDocumentBatchStatistics batchStatistics = sentimentBatchResultCollection.getStatistics(); + System.out.printf("Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s, valid document count = %s.%n", + batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + + // Analyzed sentiment for each document in a batch of documents + AtomicInteger counter = new AtomicInteger(); + sentimentBatchResultCollection.forEach(analyzeSentimentResult -> { + System.out.printf("%n%s%n", documents.get(counter.getAndIncrement())); + if (analyzeSentimentResult.isError()) { + // Erroneous document + System.out.printf("Cannot analyze sentiment. Error: %s%n", analyzeSentimentResult.getError().getMessage()); + } else { + // Valid document + DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); + SentimentConfidenceScores scores = documentSentiment.getConfidenceScores(); + System.out.printf("Analyzed document sentiment: %s, positive score: %f, neutral score: %f, negative score: %f.%n", + documentSentiment.getSentiment(), scores.getPositive(), scores.getNeutral(), scores.getNegative()); + documentSentiment.getSentences().forEach(sentenceSentiment -> { + SentimentConfidenceScores sentenceScores = sentenceSentiment.getConfidenceScores(); + System.out.printf( + "\tAnalyzed sentence sentiment: %s, positive score: %f, neutral score: %f, negative score: %f.%n", + sentenceSentiment.getSentiment(), sentenceScores.getPositive(), sentenceScores.getNeutral(), sentenceScores.getNegative()); + }); } }); } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/AnalyzeSentimentBatchDocumentsAsync.java b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/AnalyzeSentimentBatchDocumentsAsync.java index c92228592995..ccd0dca709b1 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/AnalyzeSentimentBatchDocumentsAsync.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/AnalyzeSentimentBatchDocumentsAsync.java @@ -6,6 +6,7 @@ import com.azure.ai.textanalytics.TextAnalyticsAsyncClient; import com.azure.ai.textanalytics.TextAnalyticsClientBuilder; import com.azure.ai.textanalytics.models.AnalyzeSentimentResult; +import com.azure.ai.textanalytics.util.AnalyzeSentimentResultCollection; import com.azure.ai.textanalytics.models.DocumentSentiment; import com.azure.ai.textanalytics.models.SentimentConfidenceScores; import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; @@ -45,18 +46,22 @@ public static void main(String[] args) { TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true).setModelVersion("latest"); // Analyzing sentiment for each document in a batch of documents - client.analyzeSentimentBatch(documents, requestOptions).byPage().subscribe( - pagedResponse -> { - System.out.printf("Results of Azure Text Analytics \"Sentiment Analysis\" Model, version: %s%n", pagedResponse.getModelVersion()); + client.analyzeSentimentBatchWithResponse(documents, requestOptions).subscribe( + sentimentBatchResultResponse -> { + // Response's status code + System.out.printf("Status code of request response: %d%n", sentimentBatchResultResponse.getStatusCode()); + AnalyzeSentimentResultCollection sentimentBatchResultCollection = sentimentBatchResultResponse.getValue(); + + System.out.printf("Results of Azure Text Analytics \"Sentiment Analysis\" Model, version: %s%n", sentimentBatchResultCollection.getModelVersion()); // Batch statistics - TextDocumentBatchStatistics batchStatistics = pagedResponse.getStatistics(); + TextDocumentBatchStatistics batchStatistics = sentimentBatchResultCollection.getStatistics(); System.out.printf("Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s, valid document count = %s.%n", batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); // Analyzed sentiment for each document in a batch of documents AtomicInteger counter = new AtomicInteger(); - for (AnalyzeSentimentResult analyzeSentimentResult : pagedResponse.getElements()) { + for (AnalyzeSentimentResult analyzeSentimentResult : sentimentBatchResultCollection) { System.out.printf("%n%s%n", documents.get(counter.getAndIncrement())); if (analyzeSentimentResult.isError()) { // Erroneous document diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/AnalyzeSentimentBatchStringDocuments.java b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/AnalyzeSentimentBatchStringDocuments.java index 2d5423f034db..98acd6acde4c 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/AnalyzeSentimentBatchStringDocuments.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/AnalyzeSentimentBatchStringDocuments.java @@ -8,6 +8,9 @@ import com.azure.ai.textanalytics.models.AnalyzeSentimentResult; import com.azure.ai.textanalytics.models.DocumentSentiment; import com.azure.ai.textanalytics.models.SentimentConfidenceScores; +import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; +import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; +import com.azure.ai.textanalytics.util.AnalyzeSentimentResultCollection; import com.azure.core.credential.AzureKeyCredential; import java.util.Arrays; @@ -37,9 +40,23 @@ public static void main(String[] args) { "The hotel was dark and unclean. The restaurant had amazing gnocchi!" ); + // Request options: show statistics and model version + TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true).setModelVersion("latest"); + + // Analyzing sentiment for each document in a batch of documents + AnalyzeSentimentResultCollection sentimentBatchResultCollection = client.analyzeSentimentBatch(documents, "en", requestOptions); + + // Model version + System.out.printf("Results of Azure Text Analytics \"Sentiment Analysis\" Model, version: %s%n", sentimentBatchResultCollection.getModelVersion()); + + // Batch statistics + TextDocumentBatchStatistics batchStatistics = sentimentBatchResultCollection.getStatistics(); + System.out.printf("Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s, valid document count = %s.%n", + batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + // Analyzed sentiment for each document in a batch of documents AtomicInteger counter = new AtomicInteger(); - for (AnalyzeSentimentResult analyzeSentimentResult : client.analyzeSentimentBatch(documents, "en")) { + for (AnalyzeSentimentResult analyzeSentimentResult : sentimentBatchResultCollection) { System.out.printf("%nText = %s%n", documents.get(counter.getAndIncrement())); if (analyzeSentimentResult.isError()) { // Erroneous document diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/AnalyzeSentimentBatchStringDocumentsAsync.java b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/AnalyzeSentimentBatchStringDocumentsAsync.java index 2c3e2a32eb1d..0bd7d96570b1 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/AnalyzeSentimentBatchStringDocumentsAsync.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/AnalyzeSentimentBatchStringDocumentsAsync.java @@ -5,8 +5,11 @@ import com.azure.ai.textanalytics.TextAnalyticsAsyncClient; import com.azure.ai.textanalytics.TextAnalyticsClientBuilder; +import com.azure.ai.textanalytics.models.AnalyzeSentimentResult; import com.azure.ai.textanalytics.models.DocumentSentiment; import com.azure.ai.textanalytics.models.SentimentConfidenceScores; +import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; +import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; import com.azure.core.credential.AzureKeyCredential; import java.util.Arrays; @@ -38,27 +41,40 @@ public static void main(String[] args) { "The hotel was dark and unclean. The restaurant had amazing gnocchi!" ); + // Request options: show statistics and model version + TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true).setModelVersion("latest"); + // Analyzing sentiment for each document in a batch of documents AtomicInteger counter = new AtomicInteger(); - client.analyzeSentimentBatch(documents, "en").subscribe( - analyzeSentimentResult -> { - // Analyzed sentiment for each document - System.out.printf("%nText = %s%n", documents.get(counter.getAndIncrement())); - if (analyzeSentimentResult.isError()) { - // Erroneous document - System.out.printf("Cannot analyze sentiment. Error: %s%n", analyzeSentimentResult.getError().getMessage()); - } else { - // Valid document - DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); - SentimentConfidenceScores scores = documentSentiment.getConfidenceScores(); - System.out.printf("Analyzed document sentiment: %s, positive score: %f, neutral score: %f, negative score: %f.%n", - documentSentiment.getSentiment(), scores.getPositive(), scores.getNeutral(), scores.getNegative()); - documentSentiment.getSentences().forEach(sentenceSentiment -> { - SentimentConfidenceScores sentenceScores = sentenceSentiment.getConfidenceScores(); - System.out.printf( - "\tAnalyzed sentence sentiment: %s, positive score: %f, neutral score: %f, negative score: %f.%n", - sentenceSentiment.getSentiment(), sentenceScores.getPositive(), sentenceScores.getNeutral(), sentenceScores.getNegative()); - }); + client.analyzeSentimentBatch(documents, "en", requestOptions).subscribe( + sentimentBatchResultCollection -> { + System.out.printf("Results of Azure Text Analytics \"Sentiment Analysis\" Model, version: %s%n", sentimentBatchResultCollection.getModelVersion()); + + // Batch statistics + TextDocumentBatchStatistics batchStatistics = sentimentBatchResultCollection.getStatistics(); + System.out.printf("Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s, valid document count = %s.%n", + batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + + // Analyzed sentiment for each document in a batch of documents + for (AnalyzeSentimentResult analyzeSentimentResult : sentimentBatchResultCollection) { + // Analyzed sentiment for each document + System.out.printf("%nText = %s%n", documents.get(counter.getAndIncrement())); + if (analyzeSentimentResult.isError()) { + // Erroneous document + System.out.printf("Cannot analyze sentiment. Error: %s%n", analyzeSentimentResult.getError().getMessage()); + } else { + // Valid document + DocumentSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); + SentimentConfidenceScores scores = documentSentiment.getConfidenceScores(); + System.out.printf("Analyzed document sentiment: %s, positive score: %f, neutral score: %f, negative score: %f.%n", + documentSentiment.getSentiment(), scores.getPositive(), scores.getNeutral(), scores.getNegative()); + documentSentiment.getSentences().forEach(sentenceSentiment -> { + SentimentConfidenceScores sentenceScores = sentenceSentiment.getConfidenceScores(); + System.out.printf( + "\tAnalyzed sentence sentiment: %s, positive score: %f, neutral score: %f, negative score: %f.%n", + sentenceSentiment.getSentiment(), sentenceScores.getPositive(), sentenceScores.getNeutral(), sentenceScores.getNegative()); + }); + } } }, error -> System.err.println("There was an error analyzing sentiment of the documents." + error), diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/DetectLanguageBatchDocuments.java b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/DetectLanguageBatchDocuments.java index b3c73f2d635f..c4e277ba854c 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/DetectLanguageBatchDocuments.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/DetectLanguageBatchDocuments.java @@ -6,12 +6,12 @@ import com.azure.ai.textanalytics.TextAnalyticsClient; import com.azure.ai.textanalytics.TextAnalyticsClientBuilder; import com.azure.ai.textanalytics.models.DetectLanguageInput; -import com.azure.ai.textanalytics.models.DetectLanguageResult; +import com.azure.ai.textanalytics.util.DetectLanguageResultCollection; import com.azure.ai.textanalytics.models.DetectedLanguage; import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; -import com.azure.ai.textanalytics.util.TextAnalyticsPagedResponse; import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.http.rest.Response; import com.azure.core.util.Context; import java.util.Arrays; @@ -44,29 +44,32 @@ public static void main(String[] args) { TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true).setModelVersion("latest"); // Detecting language for each document in a batch of documents - Iterable> detectedLanguageBatchResult = client.detectLanguageBatch(documents, requestOptions, Context.NONE).iterableByPage(); + Response detectedLanguageResultResponse = client.detectLanguageBatchWithResponse(documents, requestOptions, Context.NONE); - detectedLanguageBatchResult.forEach(pagedResponse -> { - System.out.printf("Results of Azure Text Analytics \"Language Detection\" Model, version: %s%n", pagedResponse.getModelVersion()); + // Response's status code + System.out.printf("Status code of request response: %d%n", detectedLanguageResultResponse.getStatusCode()); + DetectLanguageResultCollection detectedLanguageResultCollection = detectedLanguageResultResponse.getValue(); - // Batch statistics - TextDocumentBatchStatistics batchStatistics = pagedResponse.getStatistics(); - System.out.printf("Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s, valid document count = %s.%n", - batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + // Model version + System.out.printf("Results of Azure Text Analytics \"Language Detection\" Model, version: %s%n", detectedLanguageResultCollection.getModelVersion()); - // Detected language for each document in a batch of documents - AtomicInteger counter = new AtomicInteger(); - for (DetectLanguageResult detectLanguageResult : pagedResponse.getElements()) { - System.out.printf("%n%s%n", documents.get(counter.getAndIncrement())); - if (detectLanguageResult.isError()) { - // Erroneous document - System.out.printf("Cannot detect language. Error: %s%n", detectLanguageResult.getError().getMessage()); - } else { - // Valid document - DetectedLanguage language = detectLanguageResult.getPrimaryLanguage(); - System.out.printf("Detected primary language: %s, ISO 6391 name: %s, confidence score: %f.%n", - language.getName(), language.getIso6391Name(), language.getConfidenceScore()); - } + // Batch statistics + TextDocumentBatchStatistics batchStatistics = detectedLanguageResultCollection.getStatistics(); + System.out.printf("Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s, valid document count = %s.%n", + batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + + // Detected language for each document in a batch of documents + AtomicInteger counter = new AtomicInteger(); + detectedLanguageResultCollection.forEach(detectLanguageResult -> { + System.out.printf("%n%s%n", documents.get(counter.getAndIncrement())); + if (detectLanguageResult.isError()) { + // Erroneous document + System.out.printf("Cannot detect language. Error: %s%n", detectLanguageResult.getError().getMessage()); + } else { + // Valid document + DetectedLanguage language = detectLanguageResult.getPrimaryLanguage(); + System.out.printf("Detected primary language: %s, ISO 6391 name: %s, confidence score: %f.%n", + language.getName(), language.getIso6391Name(), language.getConfidenceScore()); } }); } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/DetectLanguageBatchDocumentsAsync.java b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/DetectLanguageBatchDocumentsAsync.java index 69e8bf721928..4bf6c111fbdf 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/DetectLanguageBatchDocumentsAsync.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/DetectLanguageBatchDocumentsAsync.java @@ -7,6 +7,7 @@ import com.azure.ai.textanalytics.TextAnalyticsClientBuilder; import com.azure.ai.textanalytics.models.DetectLanguageInput; import com.azure.ai.textanalytics.models.DetectLanguageResult; +import com.azure.ai.textanalytics.util.DetectLanguageResultCollection; import com.azure.ai.textanalytics.models.DetectedLanguage; import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; @@ -43,18 +44,22 @@ public static void main(String[] args) { TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true).setModelVersion("latest"); // Detecting language for each document in a batch of documents - client.detectLanguageBatch(documents, requestOptions).byPage().subscribe( - pagedResponse -> { - System.out.printf("Results of Azure Text Analytics \"Language Detection\" Model, version: %s%n", pagedResponse.getModelVersion()); + client.detectLanguageBatchWithResponse(documents, requestOptions).subscribe( + detectedLanguageResultResponse -> { + // Response's status code + System.out.printf("Status code of request response: %d%n", detectedLanguageResultResponse.getStatusCode()); + DetectLanguageResultCollection detectedLanguageResultCollection = detectedLanguageResultResponse.getValue(); + + System.out.printf("Results of Azure Text Analytics \"Language Detection\" Model, version: %s%n", detectedLanguageResultCollection.getModelVersion()); // Batch statistics - TextDocumentBatchStatistics batchStatistics = pagedResponse.getStatistics(); + TextDocumentBatchStatistics batchStatistics = detectedLanguageResultCollection.getStatistics(); System.out.printf("Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s, valid document count = %s.%n", batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); // Detected language for each document in a batch of documents\ AtomicInteger counter = new AtomicInteger(); - for (DetectLanguageResult detectLanguageResult : pagedResponse.getElements()) { + for (DetectLanguageResult detectLanguageResult : detectedLanguageResultCollection) { System.out.printf("%n%s%n", documents.get(counter.getAndIncrement())); if (detectLanguageResult.isError()) { // Erroneous document diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/DetectLanguageBatchStringDocuments.java b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/DetectLanguageBatchStringDocuments.java index 4dc289feb2af..a94b6ec7d52e 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/DetectLanguageBatchStringDocuments.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/DetectLanguageBatchStringDocuments.java @@ -5,7 +5,11 @@ import com.azure.ai.textanalytics.TextAnalyticsClient; import com.azure.ai.textanalytics.TextAnalyticsClientBuilder; +import com.azure.ai.textanalytics.models.DetectLanguageResult; import com.azure.ai.textanalytics.models.DetectedLanguage; +import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; +import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; +import com.azure.ai.textanalytics.util.DetectLanguageResultCollection; import com.azure.core.credential.AzureKeyCredential; import java.util.Arrays; @@ -34,10 +38,21 @@ public static void main(String[] args) { "Este es un documento escrito en Español." ); - // Detecting language for each document in a batch of documents + // Request options: show statistics and model version + TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true).setModelVersion("latest"); + DetectLanguageResultCollection detectedLanguageResultCollection = client.detectLanguageBatch(documents, "US", requestOptions); + + // Model version + System.out.printf("Results of Azure Text Analytics \"Language Detection\" Model, version: %s%n", detectedLanguageResultCollection.getModelVersion()); + + // Batch statistics + TextDocumentBatchStatistics batchStatistics = detectedLanguageResultCollection.getStatistics(); + System.out.printf("Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s, valid document count = %s.%n", + batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + + // Detected language for each document in a batch of documents AtomicInteger counter = new AtomicInteger(); - client.detectLanguageBatch(documents, "US").forEach(detectLanguageResult -> { - // Detected language for each document + for (DetectLanguageResult detectLanguageResult : detectedLanguageResultCollection) { System.out.printf("%nText = %s%n", documents.get(counter.getAndIncrement())); if (detectLanguageResult.isError()) { // Erroneous document @@ -48,6 +63,6 @@ public static void main(String[] args) { System.out.printf("Detected primary language: %s, ISO 6391 name: %s, confidence score: %f.%n", language.getName(), language.getIso6391Name(), language.getConfidenceScore()); } - }); + } } } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/DetectLanguageBatchStringDocumentsAsync.java b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/DetectLanguageBatchStringDocumentsAsync.java index dd434550f2b8..169c09495db2 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/DetectLanguageBatchStringDocumentsAsync.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/DetectLanguageBatchStringDocumentsAsync.java @@ -5,7 +5,10 @@ import com.azure.ai.textanalytics.TextAnalyticsAsyncClient; import com.azure.ai.textanalytics.TextAnalyticsClientBuilder; +import com.azure.ai.textanalytics.models.DetectLanguageResult; import com.azure.ai.textanalytics.models.DetectedLanguage; +import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; +import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; import com.azure.core.credential.AzureKeyCredential; import java.util.Arrays; @@ -35,20 +38,32 @@ public static void main(String[] args) { "Este es un documento escrito en Español." ); + // Request options: show statistics and model version + TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true).setModelVersion("latest"); + // Detecting language for each document in a batch of documents AtomicInteger counter = new AtomicInteger(); - client.detectLanguageBatch(documents, "US").subscribe( - detectLanguageResult -> { - // Detected language for each document - System.out.printf("%nText = %s%n", documents.get(counter.getAndIncrement())); - if (detectLanguageResult.isError()) { - // Erroneous document - System.out.printf("Cannot detect language. Error: %s%n", detectLanguageResult.getError().getMessage()); - } else { - // Valid document - DetectedLanguage language = detectLanguageResult.getPrimaryLanguage(); - System.out.printf("Detected primary language: %s, ISO 6391 name: %s, confidence score: %f.%n", - language.getName(), language.getIso6391Name(), language.getConfidenceScore()); + client.detectLanguageBatch(documents, "US", requestOptions).subscribe( + detectedLanguageResultCollection -> { + System.out.printf("Results of Azure Text Analytics \"Language Detection\" Model, version: %s%n", detectedLanguageResultCollection.getModelVersion()); + + // Batch statistics + TextDocumentBatchStatistics batchStatistics = detectedLanguageResultCollection.getStatistics(); + System.out.printf("Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s, valid document count = %s.%n", + batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + + // Detected language for each document in a batch of documents + for (DetectLanguageResult detectLanguageResult : detectedLanguageResultCollection) { + System.out.printf("%nText = %s%n", documents.get(counter.getAndIncrement())); + if (detectLanguageResult.isError()) { + // Erroneous document + System.out.printf("Cannot detect language. Error: %s%n", detectLanguageResult.getError().getMessage()); + } else { + // Valid document + DetectedLanguage language = detectLanguageResult.getPrimaryLanguage(); + System.out.printf("Detected primary language: %s, ISO 6391 name: %s, confidence score: %f.%n", + language.getName(), language.getIso6391Name(), language.getConfidenceScore()); + } } }, error -> System.err.println("There was an error detecting language of the documents." + error), diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/ExtractKeyPhrasesBatchDocuments.java b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/ExtractKeyPhrasesBatchDocuments.java index e6d1ab673340..8bed3ef2b1a5 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/ExtractKeyPhrasesBatchDocuments.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/ExtractKeyPhrasesBatchDocuments.java @@ -5,12 +5,12 @@ import com.azure.ai.textanalytics.TextAnalyticsClient; import com.azure.ai.textanalytics.TextAnalyticsClientBuilder; -import com.azure.ai.textanalytics.models.ExtractKeyPhraseResult; +import com.azure.ai.textanalytics.util.ExtractKeyPhrasesResultCollection; import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; import com.azure.ai.textanalytics.models.TextDocumentInput; -import com.azure.ai.textanalytics.util.TextAnalyticsPagedResponse; import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.http.rest.Response; import com.azure.core.util.Context; import java.util.Arrays; @@ -42,30 +42,33 @@ public static void main(String[] args) { // Request options: show statistics and model version TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true).setModelVersion("latest"); - Iterable> keyPhrasesBatchResult = - client.extractKeyPhrasesBatch(documents, requestOptions, Context.NONE).iterableByPage(); - // Extracting key phrases for each document in a batch of documents - keyPhrasesBatchResult.forEach(pagedResponse -> { - System.out.printf("Results of Azure Text Analytics \"Key Phrases Extraction\" Model, version: %s%n", pagedResponse.getModelVersion()); + Response keyPhrasesBatchResultResponse = + client.extractKeyPhrasesBatchWithResponse(documents, requestOptions, Context.NONE); + + // Response's status code + System.out.printf("Status code of request response: %d%n", keyPhrasesBatchResultResponse.getStatusCode()); + ExtractKeyPhrasesResultCollection keyPhrasesBatchResultCollection = keyPhrasesBatchResultResponse.getValue(); + + // Model version + System.out.printf("Results of Azure Text Analytics \"Key Phrases Extraction\" Model, version: %s%n", keyPhrasesBatchResultCollection.getModelVersion()); - // Batch statistics - TextDocumentBatchStatistics batchStatistics = pagedResponse.getStatistics(); - System.out.printf("Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s, valid document count = %s.%n", - batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + // Batch statistics + TextDocumentBatchStatistics batchStatistics = keyPhrasesBatchResultCollection.getStatistics(); + System.out.printf("Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s, valid document count = %s.%n", + batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); - // Extracted key phrases for each document in a batch of documents - AtomicInteger counter = new AtomicInteger(); - for (ExtractKeyPhraseResult extractKeyPhraseResult : pagedResponse.getElements()) { - System.out.printf("%n%s%n", documents.get(counter.getAndIncrement())); - if (extractKeyPhraseResult.isError()) { - // Erroneous document - System.out.printf("Cannot extract key phrases. Error: %s%n", extractKeyPhraseResult.getError().getMessage()); - } else { - // Valid document - System.out.println("Extracted phrases:"); - extractKeyPhraseResult.getKeyPhrases().forEach(keyPhrases -> System.out.printf("\t%s.%n", keyPhrases)); - } + // Extracted key phrases for each document in a batch of documents + AtomicInteger counter = new AtomicInteger(); + keyPhrasesBatchResultCollection.forEach(extractKeyPhraseResult -> { + System.out.printf("%n%s%n", documents.get(counter.getAndIncrement())); + if (extractKeyPhraseResult.isError()) { + // Erroneous document + System.out.printf("Cannot extract key phrases. Error: %s%n", extractKeyPhraseResult.getError().getMessage()); + } else { + // Valid document + System.out.println("Extracted phrases:"); + extractKeyPhraseResult.getKeyPhrases().forEach(keyPhrases -> System.out.printf("\t%s.%n", keyPhrases)); } }); } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/ExtractKeyPhrasesBatchDocumentsAsync.java b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/ExtractKeyPhrasesBatchDocumentsAsync.java index fe6ba490feec..2962aa4ae656 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/ExtractKeyPhrasesBatchDocumentsAsync.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/ExtractKeyPhrasesBatchDocumentsAsync.java @@ -6,6 +6,7 @@ import com.azure.ai.textanalytics.TextAnalyticsAsyncClient; import com.azure.ai.textanalytics.TextAnalyticsClientBuilder; import com.azure.ai.textanalytics.models.ExtractKeyPhraseResult; +import com.azure.ai.textanalytics.util.ExtractKeyPhrasesResultCollection; import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; import com.azure.ai.textanalytics.models.TextDocumentInput; @@ -42,18 +43,23 @@ public static void main(String[] args) { TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true).setModelVersion("latest"); // Extracting key phrases for each document in a batch of documents - client.extractKeyPhrasesBatch(documents, requestOptions).byPage().subscribe( - pagedResponse -> { - System.out.printf("Results of Azure Text Analytics \"Key Phrases Extraction\" Model, version: %s%n", pagedResponse.getModelVersion()); + client.extractKeyPhrasesBatchWithResponse(documents, requestOptions).subscribe( + keyPhrasesBatchResultResponse -> { + // Response's status code + System.out.printf("Status code of request response: %d%n", keyPhrasesBatchResultResponse.getStatusCode()); + ExtractKeyPhrasesResultCollection keyPhrasesBatchResultCollection = keyPhrasesBatchResultResponse.getValue(); + + // Model version + System.out.printf("Results of Azure Text Analytics \"Key Phrases Extraction\" Model, version: %s%n", keyPhrasesBatchResultCollection.getModelVersion()); // Batch statistics - TextDocumentBatchStatistics batchStatistics = pagedResponse.getStatistics(); + TextDocumentBatchStatistics batchStatistics = keyPhrasesBatchResultCollection.getStatistics(); System.out.printf("Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s, valid document count = %s.%n", batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); // Extracted key phrase for each of documents from a batch of documents AtomicInteger counter = new AtomicInteger(); - for (ExtractKeyPhraseResult extractKeyPhraseResult : pagedResponse.getElements()) { + for (ExtractKeyPhraseResult extractKeyPhraseResult : keyPhrasesBatchResultCollection) { System.out.printf("%n%s%n", documents.get(counter.getAndIncrement())); if (extractKeyPhraseResult.isError()) { // Erroneous document diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/ExtractKeyPhrasesBatchStringDocuments.java b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/ExtractKeyPhrasesBatchStringDocuments.java index 1ba940219ca7..0ed44612d6cf 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/ExtractKeyPhrasesBatchStringDocuments.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/ExtractKeyPhrasesBatchStringDocuments.java @@ -6,6 +6,9 @@ import com.azure.ai.textanalytics.TextAnalyticsClient; import com.azure.ai.textanalytics.TextAnalyticsClientBuilder; import com.azure.ai.textanalytics.models.ExtractKeyPhraseResult; +import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; +import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; +import com.azure.ai.textanalytics.util.ExtractKeyPhrasesResultCollection; import com.azure.core.credential.AzureKeyCredential; import java.util.Arrays; @@ -34,10 +37,23 @@ public static void main(String[] args) { "The pitot tube is used to measure airspeed." ); + // Request options: show statistics and model version + TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true).setModelVersion("latest"); + // Extracting key phrases for each document in a batch of documents + ExtractKeyPhrasesResultCollection keyPhrasesBatchResultCollection = client.extractKeyPhrasesBatch(documents, "en", requestOptions); + + // Model version + System.out.printf("Results of Azure Text Analytics \"Key Phrases Extraction\" Model, version: %s%n", keyPhrasesBatchResultCollection.getModelVersion()); + + // Batch statistics + TextDocumentBatchStatistics batchStatistics = keyPhrasesBatchResultCollection.getStatistics(); + System.out.printf("Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s, valid document count = %s.%n", + batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + + // Extracted key phrases for each document in a batch of documents AtomicInteger counter = new AtomicInteger(); - for (ExtractKeyPhraseResult extractKeyPhraseResult : client.extractKeyPhrasesBatch(documents, "en")) { - // Extracted key phrase for each document in a batch of documents + for (ExtractKeyPhraseResult extractKeyPhraseResult : keyPhrasesBatchResultCollection) { System.out.printf("%nText = %s%n", documents.get(counter.getAndIncrement())); if (extractKeyPhraseResult.isError()) { // Erroneous document diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/ExtractKeyPhrasesBatchStringDocumentsAsync.java b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/ExtractKeyPhrasesBatchStringDocumentsAsync.java index 6bc3fe4e8543..b79a16b2b4c1 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/ExtractKeyPhrasesBatchStringDocumentsAsync.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/ExtractKeyPhrasesBatchStringDocumentsAsync.java @@ -5,6 +5,9 @@ import com.azure.ai.textanalytics.TextAnalyticsAsyncClient; import com.azure.ai.textanalytics.TextAnalyticsClientBuilder; +import com.azure.ai.textanalytics.models.ExtractKeyPhraseResult; +import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; +import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; import com.azure.core.credential.AzureKeyCredential; import java.util.Arrays; @@ -34,18 +37,32 @@ public static void main(String[] args) { "The pitot tube is used to measure airspeed." ); + // Request options: show statistics and model version + TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true).setModelVersion("latest"); + // Extracting key phrases for each document in a batch of documents AtomicInteger counter = new AtomicInteger(); - client.extractKeyPhrasesBatch(documents, "en").subscribe( - extractKeyPhraseResult -> { - System.out.printf("%nText = %s%n", documents.get(counter.getAndIncrement())); - if (extractKeyPhraseResult.isError()) { - // Erroneous document - System.out.printf("Cannot extract key phrases. Error: %s%n", extractKeyPhraseResult.getError().getMessage()); - } else { - // Valid document - System.out.println("Extracted phrases:"); - extractKeyPhraseResult.getKeyPhrases().forEach(keyPhrases -> System.out.printf("\t%s.%n", keyPhrases)); + client.extractKeyPhrasesBatch(documents, "en", requestOptions).subscribe( + keyPhrasesBatchResultCollection -> { + // Model version + System.out.printf("Results of Azure Text Analytics \"Key Phrases Extraction\" Model, version: %s%n", keyPhrasesBatchResultCollection.getModelVersion()); + + // Batch statistics + TextDocumentBatchStatistics batchStatistics = keyPhrasesBatchResultCollection.getStatistics(); + System.out.printf("Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s, valid document count = %s.%n", + batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + + // Extracted key phrases for each document in a batch of documents + for (ExtractKeyPhraseResult extractKeyPhraseResult : keyPhrasesBatchResultCollection) { + System.out.printf("%nText = %s%n", documents.get(counter.getAndIncrement())); + if (extractKeyPhraseResult.isError()) { + // Erroneous document + System.out.printf("Cannot extract key phrases. Error: %s%n", extractKeyPhraseResult.getError().getMessage()); + } else { + // Valid document + System.out.println("Extracted phrases:"); + extractKeyPhraseResult.getKeyPhrases().forEach(keyPhrases -> System.out.printf("\t%s.%n", keyPhrases)); + } } }, error -> System.err.println("There was an error extracting key phrases of the documents." + error), diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeEntitiesBatchDocuments.java b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeEntitiesBatchDocuments.java index 24d2b4b3019c..a5c447050d78 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeEntitiesBatchDocuments.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeEntitiesBatchDocuments.java @@ -6,11 +6,12 @@ import com.azure.ai.textanalytics.TextAnalyticsClient; import com.azure.ai.textanalytics.TextAnalyticsClientBuilder; import com.azure.ai.textanalytics.models.RecognizeEntitiesResult; +import com.azure.ai.textanalytics.util.RecognizeEntitiesResultCollection; import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; import com.azure.ai.textanalytics.models.TextDocumentInput; -import com.azure.ai.textanalytics.util.TextAnalyticsPagedResponse; import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.http.rest.Response; import com.azure.core.util.Context; import java.util.Arrays; @@ -42,34 +43,37 @@ public static void main(String[] args) { // Request options: show statistics and model version TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true).setModelVersion("latest"); - Iterable> entitiesBatchResult = - client.recognizeEntitiesBatch(documents, requestOptions, Context.NONE).iterableByPage(); - // Recognizing entities for each document in a batch of documents - entitiesBatchResult.forEach(pagedResponse -> { - System.out.printf("Results of Azure Text Analytics \"Entities Recognition\" Model, version: %s%n", pagedResponse.getModelVersion()); + Response entitiesBatchResultResponse = + client.recognizeEntitiesBatchWithResponse(documents, requestOptions, Context.NONE); + + // Response's status code + System.out.printf("Status code of request response: %d%n", entitiesBatchResultResponse.getStatusCode()); + RecognizeEntitiesResultCollection recognizeEntitiesResultCollection = entitiesBatchResultResponse.getValue(); + + // Model version + System.out.printf("Results of Azure Text Analytics \"Entities Recognition\" Model, version: %s%n", recognizeEntitiesResultCollection.getModelVersion()); - // Batch statistics - TextDocumentBatchStatistics batchStatistics = pagedResponse.getStatistics(); - System.out.printf("Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s, valid document count = %s.%n", - batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + // Batch statistics + TextDocumentBatchStatistics batchStatistics = recognizeEntitiesResultCollection.getStatistics(); + System.out.printf("Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s, valid document count = %s.%n", + batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + // Recognized entities for each document in a batch of documents + AtomicInteger counter = new AtomicInteger(); + for (RecognizeEntitiesResult entitiesResult : recognizeEntitiesResultCollection) { // Recognized entities for each document in a batch of documents - AtomicInteger counter = new AtomicInteger(); - for (RecognizeEntitiesResult entitiesResult : pagedResponse.getElements()) { - // Recognized entities for each document in a batch of documents - System.out.printf("%n%s%n", documents.get(counter.getAndIncrement())); - if (entitiesResult.isError()) { - // Erroneous document - System.out.printf("Cannot recognize entities. Error: %s%n", entitiesResult.getError().getMessage()); - } else { - // Valid document - entitiesResult.getEntities().forEach(entity -> System.out.printf( - "Recognized entity: %s, entity category: %s, entity subcategory: %s, confidence score: %f.%n", - entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore()) - ); - } + System.out.printf("%n%s%n", documents.get(counter.getAndIncrement())); + if (entitiesResult.isError()) { + // Erroneous document + System.out.printf("Cannot recognize entities. Error: %s%n", entitiesResult.getError().getMessage()); + } else { + // Valid document + entitiesResult.getEntities().forEach(entity -> System.out.printf( + "Recognized entity: %s, entity category: %s, entity subcategory: %s, confidence score: %f.%n", + entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore()) + ); } - }); + } } } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeEntitiesBatchDocumentsAsync.java b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeEntitiesBatchDocumentsAsync.java index be80414355f2..1dcaef79e3e8 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeEntitiesBatchDocumentsAsync.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeEntitiesBatchDocumentsAsync.java @@ -6,6 +6,7 @@ import com.azure.ai.textanalytics.TextAnalyticsAsyncClient; import com.azure.ai.textanalytics.TextAnalyticsClientBuilder; import com.azure.ai.textanalytics.models.RecognizeEntitiesResult; +import com.azure.ai.textanalytics.util.RecognizeEntitiesResultCollection; import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; import com.azure.ai.textanalytics.models.TextDocumentInput; @@ -42,18 +43,23 @@ public static void main(String[] args) { TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true).setModelVersion("latest"); // Recognizing entities for each document in a batch of documents - client.recognizeEntitiesBatch(documents, requestOptions).byPage().subscribe( - pagedResponse -> { - System.out.printf("Results of Azure Text Analytics \"Entities Recognition\" Model, version: %s%n", pagedResponse.getModelVersion()); + client.recognizeEntitiesBatchWithResponse(documents, requestOptions).subscribe( + entitiesBatchResultResponse -> { + // Response's status code + System.out.printf("Status code of request response: %d%n", entitiesBatchResultResponse.getStatusCode()); + RecognizeEntitiesResultCollection recognizeEntitiesResultCollection = entitiesBatchResultResponse.getValue(); + + // Model version + System.out.printf("Results of Azure Text Analytics \"Entities Recognition\" Model, version: %s%n", recognizeEntitiesResultCollection.getModelVersion()); // Batch statistics - TextDocumentBatchStatistics batchStatistics = pagedResponse.getStatistics(); + TextDocumentBatchStatistics batchStatistics = recognizeEntitiesResultCollection.getStatistics(); System.out.printf("Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s, valid document count = %s.%n", batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); // Recognized entities for each of documents from a batch of documents AtomicInteger counter = new AtomicInteger(); - for (RecognizeEntitiesResult entitiesResult : pagedResponse.getElements()) { + for (RecognizeEntitiesResult entitiesResult : recognizeEntitiesResultCollection) { System.out.printf("%n%s%n", documents.get(counter.getAndIncrement())); if (entitiesResult.isError()) { // Erroneous document diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeEntitiesBatchStringDocuments.java b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeEntitiesBatchStringDocuments.java index adc289682058..4f92a32f0a7f 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeEntitiesBatchStringDocuments.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeEntitiesBatchStringDocuments.java @@ -6,6 +6,9 @@ import com.azure.ai.textanalytics.TextAnalyticsClient; import com.azure.ai.textanalytics.TextAnalyticsClientBuilder; import com.azure.ai.textanalytics.models.RecognizeEntitiesResult; +import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; +import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; +import com.azure.ai.textanalytics.util.RecognizeEntitiesResultCollection; import com.azure.core.credential.AzureKeyCredential; import java.util.Arrays; @@ -34,9 +37,23 @@ public static void main(String[] args) { "Elon Musk is the CEO of SpaceX and Tesla." ); + // Request options: show statistics and model version + TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true).setModelVersion("latest"); + // Recognizing entities for each document in a batch of documents + RecognizeEntitiesResultCollection recognizeEntitiesResultCollection = client.recognizeEntitiesBatch(documents, "en", requestOptions); + + // Model version + System.out.printf("Results of Azure Text Analytics \"Entities Recognition\" Model, version: %s%n", recognizeEntitiesResultCollection.getModelVersion()); + + // Batch statistics + TextDocumentBatchStatistics batchStatistics = recognizeEntitiesResultCollection.getStatistics(); + System.out.printf("Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s, valid document count = %s.%n", + batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + + // Recognized entities for each document in a batch of documents AtomicInteger counter = new AtomicInteger(); - for (RecognizeEntitiesResult entitiesResult : client.recognizeEntitiesBatch(documents, "en")) { + for (RecognizeEntitiesResult entitiesResult : recognizeEntitiesResultCollection) { // Recognized entities for each of documents from a batch of documents System.out.printf("%nText = %s%n", documents.get(counter.getAndIncrement())); if (entitiesResult.isError()) { diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeEntitiesBatchStringDocumentsAsync.java b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeEntitiesBatchStringDocumentsAsync.java index e275eb944cbf..d75d01eaf27b 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeEntitiesBatchStringDocumentsAsync.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeEntitiesBatchStringDocumentsAsync.java @@ -5,6 +5,9 @@ import com.azure.ai.textanalytics.TextAnalyticsAsyncClient; import com.azure.ai.textanalytics.TextAnalyticsClientBuilder; +import com.azure.ai.textanalytics.models.RecognizeEntitiesResult; +import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; +import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; import com.azure.core.credential.AzureKeyCredential; import java.util.Arrays; @@ -34,19 +37,32 @@ public static void main(String[] args) { "Elon Musk is the CEO of SpaceX and Tesla." ); + // Request options: show statistics and model version + TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true).setModelVersion("latest"); + // Recognizing entities for each document in a batch of documents AtomicInteger counter = new AtomicInteger(); - client.recognizeEntitiesBatch(documents, "en").subscribe( - entitiesResult -> { - System.out.printf("%nText = %s%n", documents.get(counter.getAndIncrement())); - if (entitiesResult.isError()) { - // Erroneous document - System.out.printf("Cannot recognize entities. Error: %s%n", entitiesResult.getError().getMessage()); - } else { - // Valid document - entitiesResult.getEntities().forEach(entity -> System.out.printf( - "Recognized entity: %s, entity category: %s, entity subcategory: %s, confidence score: %f.%n", - entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore())); + client.recognizeEntitiesBatch(documents, "en", requestOptions).subscribe( + recognizeEntitiesResultCollection -> { + // Model version + System.out.printf("Results of Azure Text Analytics \"Entities Recognition\" Model, version: %s%n", recognizeEntitiesResultCollection.getModelVersion()); + + // Batch statistics + TextDocumentBatchStatistics batchStatistics = recognizeEntitiesResultCollection.getStatistics(); + System.out.printf("Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s, valid document count = %s.%n", + batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + + for (RecognizeEntitiesResult entitiesResult : recognizeEntitiesResultCollection) { + System.out.printf("%nText = %s%n", documents.get(counter.getAndIncrement())); + if (entitiesResult.isError()) { + // Erroneous document + System.out.printf("Cannot recognize entities. Error: %s%n", entitiesResult.getError().getMessage()); + } else { + // Valid document + entitiesResult.getEntities().forEach(entity -> System.out.printf( + "Recognized entity: %s, entity category: %s, entity subcategory: %s, confidence score: %f.%n", + entity.getText(), entity.getCategory(), entity.getSubcategory(), entity.getConfidenceScore())); + } } }, error -> System.err.println("There was an error recognizing entities of the documents." + error), diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeLinkedEntitiesBatchDocuments.java b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeLinkedEntitiesBatchDocuments.java index 07cf4636dc09..7e42e74511e7 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeLinkedEntitiesBatchDocuments.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeLinkedEntitiesBatchDocuments.java @@ -6,11 +6,12 @@ import com.azure.ai.textanalytics.TextAnalyticsClient; import com.azure.ai.textanalytics.TextAnalyticsClientBuilder; import com.azure.ai.textanalytics.models.RecognizeLinkedEntitiesResult; +import com.azure.ai.textanalytics.util.RecognizeLinkedEntitiesResultCollection; import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; import com.azure.ai.textanalytics.models.TextDocumentInput; -import com.azure.ai.textanalytics.util.TextAnalyticsPagedResponse; import com.azure.core.credential.AzureKeyCredential; +import com.azure.core.http.rest.Response; import com.azure.core.util.Context; import java.util.Arrays; @@ -42,37 +43,39 @@ public static void main(String[] args) { // Request options: show statistics and model version TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true).setModelVersion("latest"); - Iterable> linkedEntitiesBatchResult = - client.recognizeLinkedEntitiesBatch(documents, requestOptions, Context.NONE).iterableByPage(); + Response linkedEntitiesBatchResultResponse = + client.recognizeLinkedEntitiesBatchWithResponse(documents, requestOptions, Context.NONE); - // Recognizing linked entities for each document in a batch of documents - linkedEntitiesBatchResult.forEach(pagedResponse -> { - System.out.printf("Results of Azure Text Analytics \"Linked Entities Recognition\" Model, version: %s%n", pagedResponse.getModelVersion()); + // Response's status code + System.out.printf("Status code of request response: %d%n", linkedEntitiesBatchResultResponse.getStatusCode()); + RecognizeLinkedEntitiesResultCollection linkedEntitiesResultCollection = linkedEntitiesBatchResultResponse.getValue(); - // Batch statistics - TextDocumentBatchStatistics batchStatistics = pagedResponse.getStatistics(); - System.out.printf("Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s, valid document count = %s.%n", - batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + // Model version + System.out.printf("Results of Azure Text Analytics \"Linked Entities Recognition\" Model, version: %s%n", linkedEntitiesResultCollection.getModelVersion()); - AtomicInteger counter = new AtomicInteger(); - for (RecognizeLinkedEntitiesResult entitiesResult : pagedResponse.getElements()) { - // Recognized linked entities from documents - System.out.printf("%n%s%n", documents.get(counter.getAndIncrement())); - if (entitiesResult.isError()) { - // Erroneous document - System.out.printf("Cannot recognize linked entities. Error: %s%n", entitiesResult.getError().getMessage()); - } else { - // Valid document - entitiesResult.getEntities().forEach(linkedEntity -> { - System.out.println("Linked Entities:"); - System.out.printf("\tName: %s, entity ID in data source: %s, URL: %s, data source: %s.%n", - linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(), linkedEntity.getDataSource()); - linkedEntity.getMatches().forEach(entityMatch -> System.out.printf( - "\tMatched entity: %s, confidence score: %f.%n", - entityMatch.getText(), entityMatch.getConfidenceScore())); - }); - } + // Batch statistics + TextDocumentBatchStatistics batchStatistics = linkedEntitiesResultCollection.getStatistics(); + System.out.printf("Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s, valid document count = %s.%n", + batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + + // Recognized linked entities for each document in a batch of documents + AtomicInteger counter = new AtomicInteger(); + for (RecognizeLinkedEntitiesResult entitiesResult : linkedEntitiesResultCollection) { + System.out.printf("%n%s%n", documents.get(counter.getAndIncrement())); + if (entitiesResult.isError()) { + // Erroneous document + System.out.printf("Cannot recognize linked entities. Error: %s%n", entitiesResult.getError().getMessage()); + } else { + // Valid document + entitiesResult.getEntities().forEach(linkedEntity -> { + System.out.println("Linked Entities:"); + System.out.printf("\tName: %s, entity ID in data source: %s, URL: %s, data source: %s.%n", + linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(), linkedEntity.getDataSource()); + linkedEntity.getMatches().forEach(entityMatch -> System.out.printf( + "\tMatched entity: %s, confidence score: %f.%n", + entityMatch.getText(), entityMatch.getConfidenceScore())); + }); } - }); + } } } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeLinkedEntitiesBatchDocumentsAsync.java b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeLinkedEntitiesBatchDocumentsAsync.java index 0f2266ba5067..b7766e442b85 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeLinkedEntitiesBatchDocumentsAsync.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeLinkedEntitiesBatchDocumentsAsync.java @@ -6,6 +6,7 @@ import com.azure.ai.textanalytics.TextAnalyticsAsyncClient; import com.azure.ai.textanalytics.TextAnalyticsClientBuilder; import com.azure.ai.textanalytics.models.RecognizeLinkedEntitiesResult; +import com.azure.ai.textanalytics.util.RecognizeLinkedEntitiesResultCollection; import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; import com.azure.ai.textanalytics.models.TextDocumentInput; @@ -42,18 +43,23 @@ public static void main(String[] args) { TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true).setModelVersion("latest"); // Recognizing linked entities for each document in a batch of documents - client.recognizeLinkedEntitiesBatch(documents, requestOptions).byPage().subscribe( - pagedResponse -> { - System.out.printf("Results of Azure Text Analytics \"Linked Entities Recognition\" Model, version: %s%n", pagedResponse.getModelVersion()); + client.recognizeLinkedEntitiesBatchWithResponse(documents, requestOptions).subscribe( + linkedEntitiesBatchResultResponse -> { + // Response's status code + System.out.printf("Status code of request response: %d%n", linkedEntitiesBatchResultResponse.getStatusCode()); + RecognizeLinkedEntitiesResultCollection linkedEntitiesResultCollection = linkedEntitiesBatchResultResponse.getValue(); + + // Model version + System.out.printf("Results of Azure Text Analytics \"Linked Entities Recognition\" Model, version: %s%n", linkedEntitiesResultCollection.getModelVersion()); // Batch statistics - TextDocumentBatchStatistics batchStatistics = pagedResponse.getStatistics(); + TextDocumentBatchStatistics batchStatistics = linkedEntitiesResultCollection.getStatistics(); System.out.printf("Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s, valid document count = %s.%n", batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); // Recognized linked entities from a batch of documents AtomicInteger counter = new AtomicInteger(); - for (RecognizeLinkedEntitiesResult entitiesResult : pagedResponse.getElements()) { + for (RecognizeLinkedEntitiesResult entitiesResult : linkedEntitiesResultCollection) { System.out.printf("%n%s%n", documents.get(counter.getAndIncrement())); if (entitiesResult.isError()) { // Erroneous document diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeLinkedEntitiesBatchStringDocuments.java b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeLinkedEntitiesBatchStringDocuments.java index 93852ec8e9fd..3ec69a623e6b 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeLinkedEntitiesBatchStringDocuments.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeLinkedEntitiesBatchStringDocuments.java @@ -6,6 +6,9 @@ import com.azure.ai.textanalytics.TextAnalyticsClient; import com.azure.ai.textanalytics.TextAnalyticsClientBuilder; import com.azure.ai.textanalytics.models.RecognizeLinkedEntitiesResult; +import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; +import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; +import com.azure.ai.textanalytics.util.RecognizeLinkedEntitiesResultCollection; import com.azure.core.credential.AzureKeyCredential; import java.util.Arrays; @@ -34,10 +37,23 @@ public static void main(String[] args) { "Mount Shasta has lenticular clouds." ); + // Request options: show statistics and model version + TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true).setModelVersion("latest"); + // Recognizing linked entities for each document in a batch of documents + RecognizeLinkedEntitiesResultCollection linkedEntitiesResultCollection = client.recognizeLinkedEntitiesBatch(documents, "en", requestOptions); + + // Model version + System.out.printf("Results of Azure Text Analytics \"Linked Entities Recognition\" Model, version: %s%n", linkedEntitiesResultCollection.getModelVersion()); + + // Batch statistics + TextDocumentBatchStatistics batchStatistics = linkedEntitiesResultCollection.getStatistics(); + System.out.printf("Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s, valid document count = %s.%n", + batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + + // Recognized linked entities from a batch of documents AtomicInteger counter = new AtomicInteger(); - for (RecognizeLinkedEntitiesResult entitiesResult : client.recognizeLinkedEntitiesBatch(documents, "en")) { - // Recognized linked entities from a batch of documents + for (RecognizeLinkedEntitiesResult entitiesResult : linkedEntitiesResultCollection) { System.out.printf("%nText = %s%n", documents.get(counter.getAndIncrement())); if (entitiesResult.isError()) { // Erroneous document diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeLinkedEntitiesBatchStringDocumentsAsync.java b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeLinkedEntitiesBatchStringDocumentsAsync.java index 04448fa31b9f..1bb07827b824 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeLinkedEntitiesBatchStringDocumentsAsync.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/batch/RecognizeLinkedEntitiesBatchStringDocumentsAsync.java @@ -5,6 +5,9 @@ import com.azure.ai.textanalytics.TextAnalyticsAsyncClient; import com.azure.ai.textanalytics.TextAnalyticsClientBuilder; +import com.azure.ai.textanalytics.models.RecognizeLinkedEntitiesResult; +import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; +import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; import com.azure.core.credential.AzureKeyCredential; import java.util.Arrays; @@ -34,23 +37,37 @@ public static void main(String[] args) { "Mount Shasta has lenticular clouds." ); + // Request options: show statistics and model version + TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setIncludeStatistics(true).setModelVersion("latest"); + // Recognizing linked entities for each document in a batch of documents AtomicInteger counter = new AtomicInteger(); - client.recognizeLinkedEntitiesBatch(documents, "en").subscribe( - entitiesResult -> { - System.out.printf("%nText = %s%n", documents.get(counter.getAndIncrement())); - if (entitiesResult.isError()) { - // Erroneous document - System.out.printf("Cannot recognize linked entities. Error: %s%n", entitiesResult.getError().getMessage()); - } else { - // Valid document - entitiesResult.getEntities().forEach(linkedEntity -> { - System.out.println("Linked Entities:"); - System.out.printf("\tName: %s, entity ID in data source: %s, URL: %s, data source: %s.%n", - linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(), linkedEntity.getDataSource()); - linkedEntity.getMatches().forEach(entityMatch -> System.out.printf( - "\tMatched entity: %s, confidence score: %f.%n", entityMatch.getText(), entityMatch.getConfidenceScore())); - }); + client.recognizeLinkedEntitiesBatch(documents, "en", requestOptions).subscribe( + linkedEntitiesResultCollection -> { + // Model version + System.out.printf("Results of Azure Text Analytics \"Linked Entities Recognition\" Model, version: %s%n", linkedEntitiesResultCollection.getModelVersion()); + + // Batch statistics + TextDocumentBatchStatistics batchStatistics = linkedEntitiesResultCollection.getStatistics(); + System.out.printf("Documents statistics: document count = %s, erroneous document count = %s, transaction count = %s, valid document count = %s.%n", + batchStatistics.getDocumentCount(), batchStatistics.getInvalidDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); + + // Recognized linked entities from a batch of documents + for (RecognizeLinkedEntitiesResult entitiesResult : linkedEntitiesResultCollection) { + System.out.printf("%nText = %s%n", documents.get(counter.getAndIncrement())); + if (entitiesResult.isError()) { + // Erroneous document + System.out.printf("Cannot recognize linked entities. Error: %s%n", entitiesResult.getError().getMessage()); + } else { + // Valid document + entitiesResult.getEntities().forEach(linkedEntity -> { + System.out.println("Linked Entities:"); + System.out.printf("\tName: %s, entity ID in data source: %s, URL: %s, data source: %s.%n", + linkedEntity.getName(), linkedEntity.getDataSourceEntityId(), linkedEntity.getUrl(), linkedEntity.getDataSource()); + linkedEntity.getMatches().forEach(entityMatch -> System.out.printf( + "\tMatched entity: %s, confidence score: %f.%n", entityMatch.getText(), entityMatch.getConfidenceScore())); + }); + } } }, error -> System.err.println("There was an error recognizing linked entities of the documents." + error), diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/DocumentInputAsyncTest.java b/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/DocumentInputAsyncTest.java index 0024054f32b6..b4ff23c5722b 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/DocumentInputAsyncTest.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/DocumentInputAsyncTest.java @@ -69,11 +69,11 @@ public void detectLanguageNullInputWithCountryHint() { /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsAsyncClient#detectLanguageBatch(Iterable)} + * {@link TextAnalyticsAsyncClient#detectLanguageBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void detectLanguagesBatchNullInput() { - StepVerifier.create(client.detectLanguageBatch(null)) + StepVerifier.create(client.detectLanguageBatch(null, null, null)) .verifyErrorSatisfies(exception -> { assertEquals(NullPointerException.class, exception.getClass()); assertTrue(INVALID_DOCUMENT_BATCH_NPE_MESSAGE.equals(exception.getMessage())); @@ -82,11 +82,11 @@ public void detectLanguagesBatchNullInput() { /** * Verifies that an {@link IllegalArgumentException} is thrown when an empty list of documents is given for - * {@link TextAnalyticsAsyncClient#detectLanguageBatch(Iterable)} + * {@link TextAnalyticsAsyncClient#detectLanguageBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void detectLanguagesBatchEmptyInputList() { - StepVerifier.create(client.detectLanguageBatch(Collections.emptyList())) + StepVerifier.create(client.detectLanguageBatch(Collections.emptyList(), null, null)) .verifyErrorSatisfies(exception -> { assertEquals(IllegalArgumentException.class, exception.getClass()); assertTrue(INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE.equals(exception.getMessage())); @@ -95,11 +95,11 @@ public void detectLanguagesBatchEmptyInputList() { /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsAsyncClient#detectLanguageBatch(Iterable, String)} + * {@link TextAnalyticsAsyncClient#detectLanguageBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void detectLanguagesBatchNullInputWithCountryHint() { - StepVerifier.create(client.detectLanguageBatch(null, "US")) + StepVerifier.create(client.detectLanguageBatch(null, "US", null)) .verifyErrorSatisfies(exception -> { assertEquals(NullPointerException.class, exception.getClass()); assertTrue(INVALID_DOCUMENT_BATCH_NPE_MESSAGE.equals(exception.getMessage())); @@ -108,11 +108,11 @@ public void detectLanguagesBatchNullInputWithCountryHint() { /** * Verifies that an {@link IllegalArgumentException} is thrown when an empty list of documents is given for - * {@link TextAnalyticsAsyncClient#detectLanguageBatch(Iterable, String)} + * {@link TextAnalyticsAsyncClient#detectLanguageBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void detectLanguagesBatchEmptyInputListWithCountryHint() { - StepVerifier.create(client.detectLanguageBatch(Collections.emptyList(), "US")) + StepVerifier.create(client.detectLanguageBatch(Collections.emptyList(), "US", null)) .verifyErrorSatisfies(exception -> { assertEquals(IllegalArgumentException.class, exception.getClass()); assertTrue(INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE.equals(exception.getMessage())); @@ -149,11 +149,11 @@ public void detectLanguagesBatchEmptyInputListWithCountryHintAndRequestOptions() /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsAsyncClient#detectLanguageBatch(Iterable, TextAnalyticsRequestOptions)} + * {@link TextAnalyticsAsyncClient#detectLanguageBatchWithResponse(Iterable, TextAnalyticsRequestOptions)} */ @Test public void detectLanguagesBatchNullInputWithMaxOverload() { - StepVerifier.create(client.detectLanguageBatch(null, + StepVerifier.create(client.detectLanguageBatchWithResponse(null, new TextAnalyticsRequestOptions().setIncludeStatistics(true))) .verifyErrorSatisfies(exception -> { assertEquals(NullPointerException.class, exception.getClass()); @@ -163,11 +163,11 @@ public void detectLanguagesBatchNullInputWithMaxOverload() { /** * Verifies that an {@link IllegalArgumentException} is thrown when an empty list of documents is given for - * {@link TextAnalyticsAsyncClient#detectLanguageBatch(Iterable, TextAnalyticsRequestOptions)} + * {@link TextAnalyticsAsyncClient#detectLanguageBatchWithResponse(Iterable, TextAnalyticsRequestOptions)} */ @Test public void detectLanguagesBatchEmptyInputListWithMaxOverload() { - StepVerifier.create(client.detectLanguageBatch(Collections.emptyList(), + StepVerifier.create(client.detectLanguageBatchWithResponse(Collections.emptyList(), new TextAnalyticsRequestOptions().setIncludeStatistics(true))) .verifyErrorSatisfies(exception -> { assertEquals(IllegalArgumentException.class, exception.getClass()); @@ -205,11 +205,11 @@ public void recognizeEntitiesNullInputWithCountryHint() { /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsAsyncClient#recognizeEntitiesBatch(Iterable)} + * {@link TextAnalyticsAsyncClient#recognizeEntitiesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void recognizeEntitiesBatchNullInput() { - StepVerifier.create(client.recognizeEntitiesBatch(null)) + StepVerifier.create(client.recognizeEntitiesBatch(null, null, null)) .verifyErrorSatisfies(exception -> { assertEquals(NullPointerException.class, exception.getClass()); assertTrue(INVALID_DOCUMENT_BATCH_NPE_MESSAGE.equals(exception.getMessage())); @@ -218,11 +218,11 @@ public void recognizeEntitiesBatchNullInput() { /** * Verifies that an {@link IllegalArgumentException} is thrown when an empty list of documents is given for - * {@link TextAnalyticsAsyncClient#recognizeEntitiesBatch(Iterable)} + * {@link TextAnalyticsAsyncClient#recognizeEntitiesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void recognizeEntitiesBatchEmptyInputList() { - StepVerifier.create(client.recognizeEntitiesBatch(Collections.emptyList())) + StepVerifier.create(client.recognizeEntitiesBatch(Collections.emptyList(), null, null)) .verifyErrorSatisfies(exception -> { assertEquals(IllegalArgumentException.class, exception.getClass()); assertTrue(INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE.equals(exception.getMessage())); @@ -231,11 +231,11 @@ public void recognizeEntitiesBatchEmptyInputList() { /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsAsyncClient#recognizeEntitiesBatch(Iterable, String)} + * {@link TextAnalyticsAsyncClient#recognizeEntitiesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void recognizeEntitiesBatchNullInputWithCountryHint() { - StepVerifier.create(client.recognizeEntitiesBatch(null, "en")) + StepVerifier.create(client.recognizeEntitiesBatch(null, "en", null)) .verifyErrorSatisfies(exception -> { assertEquals(NullPointerException.class, exception.getClass()); assertTrue(INVALID_DOCUMENT_BATCH_NPE_MESSAGE.equals(exception.getMessage())); @@ -244,11 +244,11 @@ public void recognizeEntitiesBatchNullInputWithCountryHint() { /** * Verifies that an {@link IllegalArgumentException} is thrown when an empty list of documents is given for - * {@link TextAnalyticsAsyncClient#recognizeEntitiesBatch(Iterable, String)} + * {@link TextAnalyticsAsyncClient#recognizeEntitiesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void recognizeEntitiesBatchEmptyInputListWithCountryHint() { - StepVerifier.create(client.recognizeEntitiesBatch(Collections.emptyList(), "en")) + StepVerifier.create(client.recognizeEntitiesBatch(Collections.emptyList(), "en", null)) .verifyErrorSatisfies(exception -> { assertEquals(IllegalArgumentException.class, exception.getClass()); assertTrue(INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE.equals(exception.getMessage())); @@ -285,11 +285,11 @@ public void recognizeEntitiesBatchEmptyInputListWithCountryHintAndRequestOptions /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsAsyncClient#recognizeEntitiesBatch(Iterable, TextAnalyticsRequestOptions)} + * {@link TextAnalyticsAsyncClient#recognizeEntitiesBatchWithResponse(Iterable, TextAnalyticsRequestOptions)} */ @Test public void recognizeEntitiesBatchNullInputWithMaxOverload() { - StepVerifier.create(client.recognizeEntitiesBatch(null, + StepVerifier.create(client.recognizeEntitiesBatchWithResponse(null, new TextAnalyticsRequestOptions().setIncludeStatistics(true))) .verifyErrorSatisfies(exception -> { assertEquals(NullPointerException.class, exception.getClass()); @@ -299,11 +299,11 @@ public void recognizeEntitiesBatchNullInputWithMaxOverload() { /** * Verifies that an {@link IllegalArgumentException} is thrown when an empty list of {@link TextDocumentInput} is - * given for {@link TextAnalyticsAsyncClient#recognizeEntitiesBatch(Iterable, TextAnalyticsRequestOptions)} + * given for {@link TextAnalyticsAsyncClient#recognizeEntitiesBatchWithResponse(Iterable, TextAnalyticsRequestOptions)} */ @Test public void recognizeEntitiesBatchEmptyInputListWithMaxOverload() { - StepVerifier.create(client.recognizeEntitiesBatch(Collections.emptyList(), + StepVerifier.create(client.recognizeEntitiesBatchWithResponse(Collections.emptyList(), new TextAnalyticsRequestOptions().setIncludeStatistics(true))) .verifyErrorSatisfies(exception -> { assertEquals(IllegalArgumentException.class, exception.getClass()); @@ -341,11 +341,11 @@ public void recognizeLinkedEntitiesNullInputWithCountryHint() { /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsAsyncClient#recognizeLinkedEntitiesBatch(Iterable)} + * {@link TextAnalyticsAsyncClient#recognizeLinkedEntitiesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void recognizeLinkedEntitiesBatchNullInput() { - StepVerifier.create(client.recognizeLinkedEntitiesBatch(null)) + StepVerifier.create(client.recognizeLinkedEntitiesBatch(null, null, null)) .verifyErrorSatisfies(exception -> { assertEquals(NullPointerException.class, exception.getClass()); assertTrue(INVALID_DOCUMENT_BATCH_NPE_MESSAGE.equals(exception.getMessage())); @@ -354,11 +354,11 @@ public void recognizeLinkedEntitiesBatchNullInput() { /** * Verifies that an {@link IllegalArgumentException} is thrown when an empty list of documents is given for - * {@link TextAnalyticsAsyncClient#recognizeLinkedEntitiesBatch(Iterable)} + * {@link TextAnalyticsAsyncClient#recognizeLinkedEntitiesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void recognizeLinkedEntitiesBatchEmptyInputList() { - StepVerifier.create(client.recognizeLinkedEntitiesBatch(Collections.emptyList())) + StepVerifier.create(client.recognizeLinkedEntitiesBatch(Collections.emptyList(), null, null)) .verifyErrorSatisfies(exception -> { assertEquals(IllegalArgumentException.class, exception.getClass()); assertTrue(INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE.equals(exception.getMessage())); @@ -367,11 +367,11 @@ public void recognizeLinkedEntitiesBatchEmptyInputList() { /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsAsyncClient#recognizeLinkedEntitiesBatch(Iterable, String)} + * {@link TextAnalyticsAsyncClient#recognizeLinkedEntitiesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void recognizeLinkedEntitiesBatchNullInputWithCountryHint() { - StepVerifier.create(client.recognizeLinkedEntitiesBatch(null, "en")) + StepVerifier.create(client.recognizeLinkedEntitiesBatch(null, "en", null)) .verifyErrorSatisfies(exception -> { assertEquals(NullPointerException.class, exception.getClass()); assertTrue(INVALID_DOCUMENT_BATCH_NPE_MESSAGE.equals(exception.getMessage())); @@ -380,11 +380,11 @@ public void recognizeLinkedEntitiesBatchNullInputWithCountryHint() { /** * Verifies that an {@link IllegalArgumentException} is thrown when an empty list of documents is given for - * {@link TextAnalyticsAsyncClient#recognizeLinkedEntitiesBatch(Iterable, String)} + * {@link TextAnalyticsAsyncClient#recognizeLinkedEntitiesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void recognizeLinkedEntitiesBatchEmptyInputListWithCountryHint() { - StepVerifier.create(client.recognizeLinkedEntitiesBatch(Collections.emptyList(), "en")) + StepVerifier.create(client.recognizeLinkedEntitiesBatch(Collections.emptyList(), "en", null)) .verifyErrorSatisfies(exception -> { assertEquals(IllegalArgumentException.class, exception.getClass()); assertTrue(INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE.equals(exception.getMessage())); @@ -421,11 +421,11 @@ public void recognizeLinkedEntitiesBatchEmptyInputListWithCountryHintAndRequestO /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsAsyncClient#recognizeLinkedEntitiesBatch(Iterable, TextAnalyticsRequestOptions)} + * {@link TextAnalyticsAsyncClient#recognizeLinkedEntitiesBatchWithResponse(Iterable, TextAnalyticsRequestOptions)} */ @Test public void recognizeLinkedEntitiesBatchNullInputWithMaxOverload() { - StepVerifier.create(client.recognizeLinkedEntitiesBatch(null, + StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(null, new TextAnalyticsRequestOptions().setIncludeStatistics(true))) .verifyErrorSatisfies(exception -> { assertEquals(NullPointerException.class, exception.getClass()); @@ -435,11 +435,11 @@ public void recognizeLinkedEntitiesBatchNullInputWithMaxOverload() { /** * Verifies that an {@link IllegalArgumentException} is thrown when an empty list of {@link TextDocumentInput} is - * given for {@link TextAnalyticsAsyncClient#recognizeLinkedEntitiesBatch(Iterable, TextAnalyticsRequestOptions)} + * given for {@link TextAnalyticsAsyncClient#recognizeLinkedEntitiesBatchWithResponse(Iterable, TextAnalyticsRequestOptions)} */ @Test public void recognizeLinkedEntitiesBatchEmptyInputListWithMaxOverload() { - StepVerifier.create(client.recognizeLinkedEntitiesBatch(Collections.emptyList(), + StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(Collections.emptyList(), new TextAnalyticsRequestOptions().setIncludeStatistics(true))) .verifyErrorSatisfies(exception -> { assertEquals(IllegalArgumentException.class, exception.getClass()); @@ -477,11 +477,11 @@ public void extractKeyPhrasesNullInputWithCountryHint() { /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsAsyncClient#extractKeyPhrasesBatch(Iterable)} + * {@link TextAnalyticsAsyncClient#extractKeyPhrasesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void extractKeyPhrasesBatchNullInput() { - StepVerifier.create(client.extractKeyPhrasesBatch(null)) + StepVerifier.create(client.extractKeyPhrasesBatch(null, null, null)) .verifyErrorSatisfies(exception -> { assertEquals(NullPointerException.class, exception.getClass()); assertTrue(INVALID_DOCUMENT_BATCH_NPE_MESSAGE.equals(exception.getMessage())); @@ -490,11 +490,11 @@ public void extractKeyPhrasesBatchNullInput() { /** * Verifies that an {@link IllegalArgumentException} is thrown when an empty list of documents is given for - * {@link TextAnalyticsAsyncClient#extractKeyPhrasesBatch(Iterable)} + * {@link TextAnalyticsAsyncClient#extractKeyPhrasesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void extractKeyPhrasesBatchEmptyInputList() { - StepVerifier.create(client.extractKeyPhrasesBatch(Collections.emptyList())) + StepVerifier.create(client.extractKeyPhrasesBatch(Collections.emptyList(), null, null)) .verifyErrorSatisfies(exception -> { assertEquals(IllegalArgumentException.class, exception.getClass()); assertTrue(INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE.equals(exception.getMessage())); @@ -503,11 +503,11 @@ public void extractKeyPhrasesBatchEmptyInputList() { /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsAsyncClient#extractKeyPhrasesBatch(Iterable, String)} + * {@link TextAnalyticsAsyncClient#extractKeyPhrasesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void extractKeyPhrasesBatchNullInputWithCountryHint() { - StepVerifier.create(client.extractKeyPhrasesBatch(null, "en")) + StepVerifier.create(client.extractKeyPhrasesBatch(null, "en", null)) .verifyErrorSatisfies(exception -> { assertEquals(NullPointerException.class, exception.getClass()); assertTrue(INVALID_DOCUMENT_BATCH_NPE_MESSAGE.equals(exception.getMessage())); @@ -516,11 +516,11 @@ public void extractKeyPhrasesBatchNullInputWithCountryHint() { /** * Verifies that an {@link IllegalArgumentException} is thrown when an empty list of documents is given for - * {@link TextAnalyticsAsyncClient#extractKeyPhrasesBatch(Iterable, String)} + * {@link TextAnalyticsAsyncClient#extractKeyPhrasesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void extractKeyPhrasesBatchEmptyInputListWithCountryHint() { - StepVerifier.create(client.extractKeyPhrasesBatch(Collections.emptyList(), "en")) + StepVerifier.create(client.extractKeyPhrasesBatch(Collections.emptyList(), "en", null)) .verifyErrorSatisfies(exception -> { assertEquals(IllegalArgumentException.class, exception.getClass()); assertTrue(INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE.equals(exception.getMessage())); @@ -557,11 +557,11 @@ public void extractKeyPhrasesBatchEmptyInputListWithCountryHintAndRequestOptions /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsAsyncClient#extractKeyPhrasesBatch(Iterable, TextAnalyticsRequestOptions)} + * {@link TextAnalyticsAsyncClient#extractKeyPhrasesBatchWithResponse(Iterable, TextAnalyticsRequestOptions)} */ @Test public void extractKeyPhrasesBatchNullInputWithMaxOverload() { - StepVerifier.create(client.extractKeyPhrasesBatch(null, + StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(null, new TextAnalyticsRequestOptions().setIncludeStatistics(true))) .verifyErrorSatisfies(exception -> { assertEquals(NullPointerException.class, exception.getClass()); @@ -571,11 +571,11 @@ public void extractKeyPhrasesBatchNullInputWithMaxOverload() { /** * Verifies that an {@link IllegalArgumentException} is thrown when an empty list of {@link TextDocumentInput} is - * given for {@link TextAnalyticsAsyncClient#extractKeyPhrasesBatch(Iterable, TextAnalyticsRequestOptions)} + * given for {@link TextAnalyticsAsyncClient#extractKeyPhrasesBatchWithResponse(Iterable, TextAnalyticsRequestOptions)} */ @Test public void extractKeyPhrasesBatchEmptyInputListWithMaxOverload() { - StepVerifier.create(client.extractKeyPhrasesBatch(Collections.emptyList(), + StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(Collections.emptyList(), new TextAnalyticsRequestOptions().setIncludeStatistics(true))) .verifyErrorSatisfies(exception -> { assertEquals(IllegalArgumentException.class, exception.getClass()); @@ -613,11 +613,11 @@ public void analyseSentimentNullInputWithCountryHint() { /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsAsyncClient#analyzeSentimentBatch(Iterable)} + * {@link TextAnalyticsAsyncClient#analyzeSentimentBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void analyseSentimentBatchNullInput() { - StepVerifier.create(client.analyzeSentimentBatch(null)) + StepVerifier.create(client.analyzeSentimentBatch(null, null, null)) .verifyErrorSatisfies(exception -> { assertEquals(NullPointerException.class, exception.getClass()); assertTrue(INVALID_DOCUMENT_BATCH_NPE_MESSAGE.equals(exception.getMessage())); @@ -626,11 +626,11 @@ public void analyseSentimentBatchNullInput() { /** * Verifies that an {@link IllegalArgumentException} is thrown when an empty list of documents is given for - * {@link TextAnalyticsAsyncClient#analyzeSentimentBatch(Iterable)} + * {@link TextAnalyticsAsyncClient#analyzeSentimentBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void analyseSentimentBatchEmptyInputList() { - StepVerifier.create(client.analyzeSentimentBatch(Collections.emptyList())) + StepVerifier.create(client.analyzeSentimentBatch(Collections.emptyList(), null, null)) .verifyErrorSatisfies(exception -> { assertEquals(IllegalArgumentException.class, exception.getClass()); assertTrue(INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE.equals(exception.getMessage())); @@ -639,11 +639,11 @@ public void analyseSentimentBatchEmptyInputList() { /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsAsyncClient#analyzeSentimentBatch(Iterable, String)} + * {@link TextAnalyticsAsyncClient#analyzeSentimentBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void analyseSentimentBatchNullInputWithCountryHint() { - StepVerifier.create(client.analyzeSentimentBatch(null, "en")) + StepVerifier.create(client.analyzeSentimentBatch(null, "en", null)) .verifyErrorSatisfies(exception -> { assertEquals(NullPointerException.class, exception.getClass()); assertTrue(INVALID_DOCUMENT_BATCH_NPE_MESSAGE.equals(exception.getMessage())); @@ -652,11 +652,11 @@ public void analyseSentimentBatchNullInputWithCountryHint() { /** * Verifies that an {@link IllegalArgumentException} is thrown when an empty list of documents is given for - * {@link TextAnalyticsAsyncClient#analyzeSentimentBatch(Iterable, String)} + * {@link TextAnalyticsAsyncClient#analyzeSentimentBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void analyseSentimentBatchEmptyInputListWithCountryHint() { - StepVerifier.create(client.analyzeSentimentBatch(Collections.emptyList(), "en")) + StepVerifier.create(client.analyzeSentimentBatch(Collections.emptyList(), "en", null)) .verifyErrorSatisfies(exception -> { assertEquals(IllegalArgumentException.class, exception.getClass()); assertTrue(INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE.equals(exception.getMessage())); @@ -693,11 +693,11 @@ public void analyseSentimentBatchEmptyInputListWithCountryHintAndRequestOptions( /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsAsyncClient#analyzeSentimentBatch(Iterable, TextAnalyticsRequestOptions)} + * {@link TextAnalyticsAsyncClient#analyzeSentimentBatchWithResponse(Iterable, TextAnalyticsRequestOptions)} */ @Test public void analyseSentimentBatchNullInputWithMaxOverload() { - StepVerifier.create(client.analyzeSentimentBatch(null, + StepVerifier.create(client.analyzeSentimentBatchWithResponse(null, new TextAnalyticsRequestOptions().setIncludeStatistics(true))) .verifyErrorSatisfies(exception -> { assertEquals(NullPointerException.class, exception.getClass()); @@ -707,11 +707,11 @@ public void analyseSentimentBatchNullInputWithMaxOverload() { /** * Verifies that an {@link IllegalArgumentException} is thrown when an empty list of {@link TextDocumentInput} is - * given for {@link TextAnalyticsAsyncClient#analyzeSentimentBatch(Iterable, TextAnalyticsRequestOptions)} + * given for {@link TextAnalyticsAsyncClient#analyzeSentimentBatchWithResponse(Iterable, TextAnalyticsRequestOptions)} */ @Test public void analyseSentimentEmptyInputListWithMaxOverload() { - StepVerifier.create(client.analyzeSentimentBatch(Collections.emptyList(), + StepVerifier.create(client.analyzeSentimentBatchWithResponse(Collections.emptyList(), new TextAnalyticsRequestOptions().setIncludeStatistics(true))) .verifyErrorSatisfies(exception -> { assertEquals(IllegalArgumentException.class, exception.getClass()); diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/DocumentInputTest.java b/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/DocumentInputTest.java index f9a4850e699c..8016a1c03ce0 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/DocumentInputTest.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/DocumentInputTest.java @@ -65,45 +65,45 @@ public void detectLanguageNullInputWithCountryHint() { /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsClient#detectLanguageBatch(Iterable)} + * {@link TextAnalyticsClient#detectLanguageBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void detectLanguagesBatchNullInput() { Exception exception = assertThrows(NullPointerException.class, () -> - client.detectLanguageBatch(null)); + client.detectLanguageBatch(null, null, null)); assertTrue(INVALID_DOCUMENT_BATCH_NPE_MESSAGE.equals(exception.getMessage())); } /** * Verifies that an {@link IllegalArgumentException} is thrown when an empty list of documents is given for - * {@link TextAnalyticsClient#detectLanguageBatch(Iterable)} + * {@link TextAnalyticsClient#detectLanguageBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void detectLanguagesBatchEmptyInputList() { Exception exception = assertThrows(IllegalArgumentException.class, () -> - client.detectLanguageBatch(Collections.emptyList())); + client.detectLanguageBatch(Collections.emptyList(), null, null)); assertTrue(INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE.equals(exception.getMessage())); } /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsClient#detectLanguageBatch(Iterable, String)} + * {@link TextAnalyticsClient#detectLanguageBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void detectLanguagesBatchNullInputWithCountryHint() { Exception exception = assertThrows(NullPointerException.class, () -> - client.detectLanguageBatch(null, "US")); + client.detectLanguageBatch(null, "US", null)); assertTrue(INVALID_DOCUMENT_BATCH_NPE_MESSAGE.equals(exception.getMessage())); } /** * Verifies that a {@link IllegalArgumentException} is thrown when an empty list of documents is given for - * {@link TextAnalyticsClient#detectLanguageBatch(Iterable, String)} + * {@link TextAnalyticsClient#detectLanguageBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void detectLanguagesBatchEmptyInputListWithCountryHint() { Exception exception = assertThrows(IllegalArgumentException.class, () -> - client.detectLanguageBatch(Collections.emptyList(), "US")); + client.detectLanguageBatch(Collections.emptyList(), "US", null)); assertTrue(INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE.equals(exception.getMessage())); } @@ -133,24 +133,24 @@ public void detectLanguagesBatchEmptyInputListWithCountryHintAndRequestOptions() /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsClient#detectLanguageBatch(Iterable, TextAnalyticsRequestOptions, Context)} + * {@link TextAnalyticsClient#detectLanguageBatchWithResponse(Iterable, TextAnalyticsRequestOptions, Context)} */ @Test public void detectLanguagesBatchNullInputWithMaxOverload() { Exception exception = assertThrows(NullPointerException.class, () -> - client.detectLanguageBatch(null, new TextAnalyticsRequestOptions().setIncludeStatistics(true), + client.detectLanguageBatchWithResponse(null, new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE)); assertTrue(INVALID_DOCUMENT_BATCH_NPE_MESSAGE.equals(exception.getMessage())); } /** * Verifies that a {@link IllegalArgumentException} is thrown when an empty list of {@link DetectLanguageInput} is - * given for {@link TextAnalyticsClient#detectLanguageBatch(Iterable, TextAnalyticsRequestOptions, Context)} + * given for {@link TextAnalyticsClient#detectLanguageBatchWithResponse(Iterable, TextAnalyticsRequestOptions, Context)} */ @Test public void detectLanguagesBatchEmptyInputListWithMaxOverload() { Exception exception = assertThrows(IllegalArgumentException.class, () -> - client.detectLanguageBatch( + client.detectLanguageBatchWithResponse( Collections.emptyList(), new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE)); assertTrue(INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE.equals(exception.getMessage())); } @@ -181,45 +181,45 @@ public void recognizeEntitiesNullInputWithCountryHint() { /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsClient#recognizeEntitiesBatch(Iterable)} + * {@link TextAnalyticsClient#recognizeEntitiesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void recognizeEntitiesBatchNullInput() { Exception exception = assertThrows(NullPointerException.class, () -> - client.recognizeEntitiesBatch(null)); + client.recognizeEntitiesBatch(null, null, null)); assertTrue(INVALID_DOCUMENT_BATCH_NPE_MESSAGE.equals(exception.getMessage())); } /** * Verifies that an {@link IllegalArgumentException} is thrown when an empty list of documents is given for - * {@link TextAnalyticsClient#recognizeEntitiesBatch(Iterable)} + * {@link TextAnalyticsClient#recognizeEntitiesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void recognizeEntitiesBatchEmptyInputList() { Exception exception = assertThrows(IllegalArgumentException.class, () -> - client.recognizeEntitiesBatch(Collections.emptyList())); + client.recognizeEntitiesBatch(Collections.emptyList(), null, null)); assertTrue(INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE.equals(exception.getMessage())); } /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsClient#recognizeEntitiesBatch(Iterable, String)} + * {@link TextAnalyticsClient#recognizeEntitiesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void recognizeEntitiesBatchNullInputWithCountryHint() { Exception exception = assertThrows(NullPointerException.class, () -> - client.recognizeEntitiesBatch(null, "en")); + client.recognizeEntitiesBatch(null, "en", null)); assertTrue(INVALID_DOCUMENT_BATCH_NPE_MESSAGE.equals(exception.getMessage())); } /** * Verifies that a {@link IllegalArgumentException} is thrown when an empty list of documents is given for - * {@link TextAnalyticsClient#recognizeEntitiesBatch(Iterable, String)} + * {@link TextAnalyticsClient#recognizeEntitiesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void recognizeEntitiesBatchEmptyInputListWithCountryHint() { Exception exception = assertThrows(IllegalArgumentException.class, () -> - client.recognizeEntitiesBatch(Collections.emptyList(), "en")); + client.recognizeEntitiesBatch(Collections.emptyList(), "en", null)); assertTrue(INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE.equals(exception.getMessage())); } @@ -249,24 +249,24 @@ public void recognizeEntitiesBatchEmptyInputListWithCountryHintAndRequestOptions /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsClient#recognizeEntitiesBatch(Iterable, TextAnalyticsRequestOptions, Context)} + * {@link TextAnalyticsClient#recognizeEntitiesBatchWithResponse(Iterable, TextAnalyticsRequestOptions, Context)} */ @Test public void recognizeEntitiesBatchNullInputWithMaxOverload() { Exception exception = assertThrows(NullPointerException.class, () -> - client.recognizeEntitiesBatch(null, new TextAnalyticsRequestOptions().setIncludeStatistics(true), + client.recognizeEntitiesBatchWithResponse(null, new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE)); assertTrue(INVALID_DOCUMENT_BATCH_NPE_MESSAGE.equals(exception.getMessage())); } /** * Verifies that a {@link IllegalArgumentException} is thrown when an empty list of {@link TextDocumentInput} is - * given for {@link TextAnalyticsClient#recognizeEntitiesBatch(Iterable, TextAnalyticsRequestOptions, Context)} + * given for {@link TextAnalyticsClient#recognizeEntitiesBatchWithResponse(Iterable, TextAnalyticsRequestOptions, Context)} */ @Test public void recognizeEntitiesBatchEmptyInputListWithMaxOverload() { Exception exception = assertThrows(IllegalArgumentException.class, () -> - client.recognizeEntitiesBatch( + client.recognizeEntitiesBatchWithResponse( Collections.emptyList(), new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE)); assertTrue(INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE.equals(exception.getMessage())); } @@ -297,45 +297,45 @@ public void recognizeLinkedEntitiesNullInputWithCountryHint() { /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsClient#recognizeLinkedEntitiesBatch(Iterable)} + * {@link TextAnalyticsClient#recognizeLinkedEntitiesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void recognizeLinkedEntitiesBatchNullInput() { Exception exception = assertThrows(NullPointerException.class, () -> - client.recognizeLinkedEntitiesBatch(null)); + client.recognizeLinkedEntitiesBatch(null, null, null)); assertTrue(INVALID_DOCUMENT_BATCH_NPE_MESSAGE.equals(exception.getMessage())); } /** * Verifies that an {@link IllegalArgumentException} is thrown when an empty list of documents is given for - * {@link TextAnalyticsClient#recognizeLinkedEntitiesBatch(Iterable)} + * {@link TextAnalyticsClient#recognizeLinkedEntitiesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void recognizeLinkedEntitiesBatchEmptyInputList() { Exception exception = assertThrows(IllegalArgumentException.class, () -> - client.recognizeLinkedEntitiesBatch(Collections.emptyList())); + client.recognizeLinkedEntitiesBatch(Collections.emptyList(), null, null)); assertTrue(INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE.equals(exception.getMessage())); } /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsClient#recognizeLinkedEntitiesBatch(Iterable, String)} + * {@link TextAnalyticsClient#recognizeLinkedEntitiesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void recognizeLinkedEntitiesBatchNullInputWithCountryHint() { Exception exception = assertThrows(NullPointerException.class, () -> - client.recognizeLinkedEntitiesBatch(null, "en")); + client.recognizeLinkedEntitiesBatch(null, "en", null)); assertTrue(INVALID_DOCUMENT_BATCH_NPE_MESSAGE.equals(exception.getMessage())); } /** * Verifies that a {@link IllegalArgumentException} is thrown when an empty list of documents is given for - * {@link TextAnalyticsClient#recognizeLinkedEntitiesBatch(Iterable, String)} + * {@link TextAnalyticsClient#recognizeLinkedEntitiesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void recognizeLinkedEntitiesBatchEmptyInputListWithCountryHint() { Exception exception = assertThrows(IllegalArgumentException.class, () -> - client.recognizeLinkedEntitiesBatch(Collections.emptyList(), "en")); + client.recognizeLinkedEntitiesBatch(Collections.emptyList(), "en", null)); assertTrue(INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE.equals(exception.getMessage())); } @@ -365,12 +365,12 @@ public void recognizeLinkedEntitiesBatchEmptyInputListWithCountryHintAndRequestO /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsClient#recognizeLinkedEntitiesBatch(Iterable, TextAnalyticsRequestOptions, Context)} + * {@link TextAnalyticsClient#recognizeLinkedEntitiesBatchWithResponse(Iterable, TextAnalyticsRequestOptions, Context)} */ @Test public void recognizeLinkedEntitiesBatchNullInputWithMaxOverload() { Exception exception = assertThrows(NullPointerException.class, () -> - client.recognizeLinkedEntitiesBatch(null, + client.recognizeLinkedEntitiesBatchWithResponse(null, new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE)); assertTrue(INVALID_DOCUMENT_BATCH_NPE_MESSAGE.equals(exception.getMessage())); } @@ -378,12 +378,12 @@ public void recognizeLinkedEntitiesBatchNullInputWithMaxOverload() { /** * Verifies that a {@link IllegalArgumentException} is thrown when an empty list of {@link TextDocumentInput} is * given for - * {@link TextAnalyticsClient#recognizeLinkedEntitiesBatch(Iterable, TextAnalyticsRequestOptions, Context)} + * {@link TextAnalyticsClient#recognizeLinkedEntitiesBatchWithResponse(Iterable, TextAnalyticsRequestOptions, Context)} */ @Test public void recognizeLinkedEntitiesBatchEmptyInputListWithMaxOverload() { Exception exception = assertThrows(IllegalArgumentException.class, () -> - client.recognizeLinkedEntitiesBatch( + client.recognizeLinkedEntitiesBatchWithResponse( Collections.emptyList(), new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE)); assertTrue(INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE.equals(exception.getMessage())); } @@ -414,45 +414,45 @@ public void extractKeyPhrasesNullInputWithCountryHint() { /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsClient#extractKeyPhrasesBatch(Iterable)} + * {@link TextAnalyticsClient#extractKeyPhrasesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void extractKeyPhrasesBatchNullInput() { Exception exception = assertThrows(NullPointerException.class, () -> - client.extractKeyPhrasesBatch(null)); + client.extractKeyPhrasesBatch(null, null, null)); assertTrue(INVALID_DOCUMENT_BATCH_NPE_MESSAGE.equals(exception.getMessage())); } /** * Verifies that an {@link IllegalArgumentException} is thrown when an empty list of documents is given for - * {@link TextAnalyticsClient#extractKeyPhrasesBatch(Iterable)} + * {@link TextAnalyticsClient#extractKeyPhrasesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void extractKeyPhrasesBatchEmptyInputList() { Exception exception = assertThrows(IllegalArgumentException.class, () -> - client.extractKeyPhrasesBatch(Collections.emptyList())); + client.extractKeyPhrasesBatch(Collections.emptyList(), null, null)); assertTrue(INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE.equals(exception.getMessage())); } /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsClient#extractKeyPhrasesBatch(Iterable, String)} + * {@link TextAnalyticsClient#extractKeyPhrasesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void extractKeyPhrasesBatchNullInputWithCountryHint() { Exception exception = assertThrows(NullPointerException.class, () -> - client.extractKeyPhrasesBatch(null, "en")); + client.extractKeyPhrasesBatch(null, "en", null)); assertTrue(INVALID_DOCUMENT_BATCH_NPE_MESSAGE.equals(exception.getMessage())); } /** * Verifies that a {@link IllegalArgumentException} is thrown when an empty list of documents is given for - * {@link TextAnalyticsClient#extractKeyPhrasesBatch(Iterable, String)} + * {@link TextAnalyticsClient#extractKeyPhrasesBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void extractKeyPhrasesBatchEmptyInputListWithCountryHint() { Exception exception = assertThrows(IllegalArgumentException.class, () -> - client.extractKeyPhrasesBatch(Collections.emptyList(), "en")); + client.extractKeyPhrasesBatch(Collections.emptyList(), "en", null)); assertTrue(INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE.equals(exception.getMessage())); } @@ -481,12 +481,12 @@ public void extractKeyPhrasesBatchEmptyInputListWithCountryHintAndRequestOptions /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsClient#extractKeyPhrasesBatch(Iterable, TextAnalyticsRequestOptions, Context)} + * {@link TextAnalyticsClient#extractKeyPhrasesBatchWithResponse(Iterable, TextAnalyticsRequestOptions, Context)} */ @Test public void extractKeyPhrasesBatchNullInputWithMaxOverload() { Exception exception = assertThrows(NullPointerException.class, () -> - client.extractKeyPhrasesBatch(null, new TextAnalyticsRequestOptions().setIncludeStatistics(true), + client.extractKeyPhrasesBatchWithResponse(null, new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE)); assertTrue(INVALID_DOCUMENT_BATCH_NPE_MESSAGE.equals(exception.getMessage())); } @@ -494,12 +494,12 @@ public void extractKeyPhrasesBatchNullInputWithMaxOverload() { /** * Verifies that a {@link IllegalArgumentException} is thrown when an empty list of {@link TextDocumentInput} is * given for - * {@link TextAnalyticsClient#extractKeyPhrasesBatch(Iterable, TextAnalyticsRequestOptions, Context)} + * {@link TextAnalyticsClient#extractKeyPhrasesBatchWithResponse(Iterable, TextAnalyticsRequestOptions, Context)} */ @Test public void extractKeyPhrasesBatchEmptyInputListWithMaxOverload() { Exception exception = assertThrows(IllegalArgumentException.class, () -> - client.extractKeyPhrasesBatch( + client.extractKeyPhrasesBatchWithResponse( Collections.emptyList(), new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE)); assertTrue(INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE.equals(exception.getMessage())); } @@ -530,45 +530,45 @@ public void analyseSentimentNullInputWithCountryHint() { /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsClient#analyzeSentimentBatch(Iterable)} + * {@link TextAnalyticsClient#analyzeSentimentBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void analyseSentimentBatchNullInput() { Exception exception = assertThrows(NullPointerException.class, () -> - client.analyzeSentimentBatch(null)); + client.analyzeSentimentBatch(null, null, null)); assertTrue(INVALID_DOCUMENT_BATCH_NPE_MESSAGE.equals(exception.getMessage())); } /** * Verifies that an {@link IllegalArgumentException} is thrown when an empty list of documents is given for - * {@link TextAnalyticsClient#analyzeSentimentBatch(Iterable)} + * {@link TextAnalyticsClient#analyzeSentimentBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void analyseSentimentBatchEmptyInputList() { Exception exception = assertThrows(IllegalArgumentException.class, () -> - client.analyzeSentimentBatch(Collections.emptyList())); + client.analyzeSentimentBatch(Collections.emptyList(), null, null)); assertTrue(INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE.equals(exception.getMessage())); } /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsClient#analyzeSentimentBatch(Iterable, String)} + * {@link TextAnalyticsClient#analyzeSentimentBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void analyseSentimentBatchNullInputWithCountryHint() { Exception exception = assertThrows(NullPointerException.class, () -> - client.analyzeSentimentBatch(null, "en")); + client.analyzeSentimentBatch(null, "en", null)); assertTrue(INVALID_DOCUMENT_BATCH_NPE_MESSAGE.equals(exception.getMessage())); } /** * Verifies that a {@link IllegalArgumentException} is thrown when an empty list of documents is given for - * {@link TextAnalyticsClient#analyzeSentimentBatch(Iterable, String)} + * {@link TextAnalyticsClient#analyzeSentimentBatch(Iterable, String, TextAnalyticsRequestOptions)} */ @Test public void analyseSentimentBatchEmptyInputListWithCountryHint() { Exception exception = assertThrows(IllegalArgumentException.class, () -> - client.analyzeSentimentBatch(Collections.emptyList(), "en")); + client.analyzeSentimentBatch(Collections.emptyList(), "en", null)); assertTrue(INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE.equals(exception.getMessage())); } @@ -598,23 +598,23 @@ public void analyseSentimentBatchEmptyInputListWithCountryHintAndRequestOptions( /** * Verifies that a {@link NullPointerException} is thrown when null documents is given for - * {@link TextAnalyticsClient#analyzeSentimentBatch(Iterable, TextAnalyticsRequestOptions, Context)} + * {@link TextAnalyticsClient#analyzeSentimentBatchWithResponse(Iterable, TextAnalyticsRequestOptions, Context)} */ @Test public void analyseSentimentBatchNullInputWithMaxOverload() { Exception exception = assertThrows(NullPointerException.class, () -> - client.analyzeSentimentBatch(null, new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE)); + client.analyzeSentimentBatchWithResponse(null, new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE)); assertTrue(INVALID_DOCUMENT_BATCH_NPE_MESSAGE.equals(exception.getMessage())); } /** * Verifies that a {@link IllegalArgumentException} is thrown when an empty list of {@link TextDocumentInput} is - * given for {@link TextAnalyticsClient#analyzeSentimentBatch(Iterable, TextAnalyticsRequestOptions, Context)} + * given for {@link TextAnalyticsClient#analyzeSentimentBatchWithResponse(Iterable, TextAnalyticsRequestOptions, Context)} */ @Test public void analyseSentimentEmptyInputListWithMaxOverload() { Exception exception = assertThrows(IllegalArgumentException.class, () -> - client.analyzeSentimentBatch( + client.analyzeSentimentBatchWithResponse( Collections.emptyList(), new TextAnalyticsRequestOptions().setIncludeStatistics(true), Context.NONE)); assertTrue(INVALID_DOCUMENT_EMPTY_LIST_EXCEPTION_MESSAGE.equals(exception.getMessage())); } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/TestUtils.java b/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/TestUtils.java index 9f9babba652e..a6ebab3b3673 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/TestUtils.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/TestUtils.java @@ -4,27 +4,31 @@ package com.azure.ai.textanalytics; import com.azure.ai.textanalytics.models.AnalyzeSentimentResult; +import com.azure.ai.textanalytics.util.AnalyzeSentimentResultCollection; import com.azure.ai.textanalytics.models.CategorizedEntity; import com.azure.ai.textanalytics.models.CategorizedEntityCollection; +import com.azure.ai.textanalytics.models.DetectLanguageInput; import com.azure.ai.textanalytics.models.DetectLanguageResult; +import com.azure.ai.textanalytics.util.DetectLanguageResultCollection; import com.azure.ai.textanalytics.models.DetectedLanguage; import com.azure.ai.textanalytics.models.DocumentSentiment; +import com.azure.ai.textanalytics.models.EntityCategory; import com.azure.ai.textanalytics.models.ExtractKeyPhraseResult; +import com.azure.ai.textanalytics.util.ExtractKeyPhrasesResultCollection; import com.azure.ai.textanalytics.models.KeyPhrasesCollection; import com.azure.ai.textanalytics.models.LinkedEntity; import com.azure.ai.textanalytics.models.LinkedEntityCollection; import com.azure.ai.textanalytics.models.LinkedEntityMatch; import com.azure.ai.textanalytics.models.RecognizeEntitiesResult; +import com.azure.ai.textanalytics.util.RecognizeEntitiesResultCollection; import com.azure.ai.textanalytics.models.RecognizeLinkedEntitiesResult; +import com.azure.ai.textanalytics.util.RecognizeLinkedEntitiesResultCollection; import com.azure.ai.textanalytics.models.SentenceSentiment; import com.azure.ai.textanalytics.models.SentimentConfidenceScores; import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; -import com.azure.ai.textanalytics.models.TextDocumentStatistics; -import com.azure.ai.textanalytics.models.DetectLanguageInput; -import com.azure.ai.textanalytics.models.EntityCategory; import com.azure.ai.textanalytics.models.TextDocumentInput; +import com.azure.ai.textanalytics.models.TextDocumentStatistics; import com.azure.ai.textanalytics.models.TextSentiment; -import com.azure.ai.textanalytics.util.TextAnalyticsPagedResponse; import com.azure.core.exception.HttpResponseException; import com.azure.core.http.HttpClient; import com.azure.core.util.Configuration; @@ -136,8 +140,10 @@ static List getTextDocumentInputs(List inputs) { /** * Helper method to get the expected Batch Detected Languages + * + * @return A {@link DetectLanguageResultCollection}. */ - static TextAnalyticsPagedResponse getExpectedBatchDetectedLanguages() { + static DetectLanguageResultCollection getExpectedBatchDetectedLanguages() { DetectedLanguage detectedLanguage1 = new DetectedLanguage("English", "en", 0.0, null); DetectedLanguage detectedLanguage2 = new DetectedLanguage("Spanish", "es", 0.0, null); DetectedLanguage detectedLanguage3 = new DetectedLanguage("(Unknown)", "(Unknown)", 0.0, null); @@ -153,27 +159,18 @@ static TextAnalyticsPagedResponse getExpectedBatchDetected TextDocumentBatchStatistics textDocumentBatchStatistics = new TextDocumentBatchStatistics(3, 3, 0, 3); List detectLanguageResultList = Arrays.asList(detectLanguageResult1, detectLanguageResult2, detectLanguageResult3); - return new TextAnalyticsPagedResponse<>(null, 200, null, - detectLanguageResultList, null, DEFAULT_MODEL_VERSION, textDocumentBatchStatistics); + return new DetectLanguageResultCollection(detectLanguageResultList, DEFAULT_MODEL_VERSION, textDocumentBatchStatistics); } /** * Helper method to get the expected Batch Categorized Entities + * + * @return A {@link RecognizeEntitiesResultCollection}. */ - static TextAnalyticsPagedResponse getExpectedBatchCategorizedEntities() { - return new TextAnalyticsPagedResponse<>(null, 200, null, + static RecognizeEntitiesResultCollection getExpectedBatchCategorizedEntities() { + return new RecognizeEntitiesResultCollection( Arrays.asList(getExpectedBatchCategorizedEntities1(), getExpectedBatchCategorizedEntities2()), - null, DEFAULT_MODEL_VERSION, - new TextDocumentBatchStatistics(2, 2, 0, 2)); - } - - /** - * Helper method to get the expected Categorized Entities - */ - static TextAnalyticsPagedResponse getExpectedCategorizedEntities() { - return new TextAnalyticsPagedResponse<>(null, 200, null, - getCategorizedEntitiesList1(), - null, DEFAULT_MODEL_VERSION, + DEFAULT_MODEL_VERSION, new TextDocumentBatchStatistics(2, 2, 0, 2)); } @@ -181,9 +178,9 @@ static TextAnalyticsPagedResponse getExpectedCategorizedEntit * Helper method to get the expected Categorized Entities List 1 */ static List getCategorizedEntitiesList1() { - CategorizedEntity categorizedEntity1 = new CategorizedEntity("trip", EntityCategory.EVENT.toString(), null, 0.0); - CategorizedEntity categorizedEntity2 = new CategorizedEntity("Seattle", EntityCategory.LOCATION.toString(), "GPE", 0.0); - CategorizedEntity categorizedEntity3 = new CategorizedEntity("last week", EntityCategory.DATE_TIME.toString(), "DateRange", 0.0); + CategorizedEntity categorizedEntity1 = new CategorizedEntity("trip", EntityCategory.EVENT, null, 0.0); + CategorizedEntity categorizedEntity2 = new CategorizedEntity("Seattle", EntityCategory.LOCATION, "GPE", 0.0); + CategorizedEntity categorizedEntity3 = new CategorizedEntity("last week", EntityCategory.DATE_TIME, "DateRange", 0.0); return Arrays.asList(categorizedEntity1, categorizedEntity2, categorizedEntity3); } @@ -191,7 +188,7 @@ static List getCategorizedEntitiesList1() { * Helper method to get the expected Categorized Entities List 2 */ static List getCategorizedEntitiesList2() { - CategorizedEntity categorizedEntity3 = new CategorizedEntity("Microsoft", EntityCategory.ORGANIZATION.toString(), null, 0.0); + CategorizedEntity categorizedEntity3 = new CategorizedEntity("Microsoft", EntityCategory.ORGANIZATION, null, 0.0); return Arrays.asList(categorizedEntity3); } @@ -217,8 +214,9 @@ static RecognizeEntitiesResult getExpectedBatchCategorizedEntities2() { /** * Helper method to get the expected Batch Linked Entities + * @return A {@link RecognizeLinkedEntitiesResultCollection}. */ - static TextAnalyticsPagedResponse getExpectedBatchLinkedEntities() { + static RecognizeLinkedEntitiesResultCollection getExpectedBatchLinkedEntities() { LinkedEntityMatch linkedEntityMatch1 = new LinkedEntityMatch("Seattle", 0.0); LinkedEntityMatch linkedEntityMatch2 = new LinkedEntityMatch("Microsoft", 0.0); @@ -244,13 +242,14 @@ static TextAnalyticsPagedResponse getExpectedBatc TextDocumentBatchStatistics textDocumentBatchStatistics = new TextDocumentBatchStatistics(2, 2, 0, 2); List recognizeLinkedEntitiesResultList = Arrays.asList(recognizeLinkedEntitiesResult1, recognizeLinkedEntitiesResult2); - return new TextAnalyticsPagedResponse<>(null, 200, null, recognizeLinkedEntitiesResultList, null, DEFAULT_MODEL_VERSION, textDocumentBatchStatistics); + return new RecognizeLinkedEntitiesResultCollection(recognizeLinkedEntitiesResultList, DEFAULT_MODEL_VERSION, textDocumentBatchStatistics); } /** * Helper method to get the expected Batch Key Phrases + * @return */ - static TextAnalyticsPagedResponse getExpectedBatchKeyPhrases() { + static ExtractKeyPhrasesResultCollection getExpectedBatchKeyPhrases() { TextDocumentStatistics textDocumentStatistics1 = new TextDocumentStatistics(49, 1); TextDocumentStatistics textDocumentStatistics2 = new TextDocumentStatistics(21, 1); @@ -260,28 +259,28 @@ static TextAnalyticsPagedResponse getExpectedBatchKeyPhr TextDocumentBatchStatistics textDocumentBatchStatistics = new TextDocumentBatchStatistics(2, 2, 0, 2); List extractKeyPhraseResultList = Arrays.asList(extractKeyPhraseResult1, extractKeyPhraseResult2); - return new TextAnalyticsPagedResponse<>(null, 200, null, extractKeyPhraseResultList, - null, DEFAULT_MODEL_VERSION, textDocumentBatchStatistics); + return new ExtractKeyPhrasesResultCollection(extractKeyPhraseResultList, DEFAULT_MODEL_VERSION, textDocumentBatchStatistics); } /** * Helper method to get the expected Batch Text Sentiments + * @return */ - static TextAnalyticsPagedResponse getExpectedBatchTextSentiment() { + static AnalyzeSentimentResultCollection getExpectedBatchTextSentiment() { final TextDocumentStatistics textDocumentStatistics = new TextDocumentStatistics(67, 1); - final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment(TextSentiment.MIXED.toString(), + final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment(TextSentiment.MIXED, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( - new SentenceSentiment("", TextSentiment.NEGATIVE.toString(), new SentimentConfidenceScores(0.0, 0.0, 0.0)), - new SentenceSentiment("", TextSentiment.POSITIVE.toString(), new SentimentConfidenceScores(0.0, 0.0, 0.0)) + new SentenceSentiment("", TextSentiment.NEGATIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)), + new SentenceSentiment("", TextSentiment.POSITIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); - final DocumentSentiment expectedDocumentSentiment2 = new DocumentSentiment(TextSentiment.MIXED.toString(), + final DocumentSentiment expectedDocumentSentiment2 = new DocumentSentiment(TextSentiment.MIXED, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( - new SentenceSentiment("", TextSentiment.POSITIVE.toString(), new SentimentConfidenceScores(0.0, 0.0, 0.0)), - new SentenceSentiment("", TextSentiment.NEGATIVE.toString(), new SentimentConfidenceScores(0.0, 0.0, 0.0)) + new SentenceSentiment("", TextSentiment.POSITIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)), + new SentenceSentiment("", TextSentiment.NEGATIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); final AnalyzeSentimentResult analyzeSentimentResult1 = new AnalyzeSentimentResult("0", @@ -290,9 +289,9 @@ static TextAnalyticsPagedResponse getExpectedBatchTextSe final AnalyzeSentimentResult analyzeSentimentResult2 = new AnalyzeSentimentResult("1", textDocumentStatistics, null, expectedDocumentSentiment2); - return new TextAnalyticsPagedResponse<>(null, 200, null, + return new AnalyzeSentimentResultCollection( Arrays.asList(analyzeSentimentResult1, analyzeSentimentResult2), - null, DEFAULT_MODEL_VERSION, new TextDocumentBatchStatistics(2, 2, 0, 2)); + DEFAULT_MODEL_VERSION, new TextDocumentBatchStatistics(2, 2, 0, 2)); } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/TextAnalyticsAsyncClientTest.java b/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/TextAnalyticsAsyncClientTest.java index fd144a94de57..abd22649c4b2 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/TextAnalyticsAsyncClientTest.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/TextAnalyticsAsyncClientTest.java @@ -65,8 +65,11 @@ private TextAnalyticsAsyncClient getTextAnalyticsAsyncClient(HttpClient httpClie public void detectLanguagesBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageShowStatisticsRunner((inputs, options) -> - StepVerifier.create(client.detectLanguageBatch(inputs, options).byPage()) - .assertNext(response -> validateDetectLanguage(true, getExpectedBatchDetectedLanguages(), response)) + StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) + .assertNext(response -> + validateDetectLanguageResultCollectionWithResponse(true, getExpectedBatchDetectedLanguages(), + 200, response) + ) .verifyComplete()); } @@ -78,8 +81,10 @@ public void detectLanguagesBatchInputShowStatistics(HttpClient httpClient, TextA public void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageRunner((inputs) -> - StepVerifier.create(client.detectLanguageBatch(inputs, null).byPage()) - .assertNext(response -> validateDetectLanguage(false, getExpectedBatchDetectedLanguages(), response)) + StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null)) + .assertNext(response -> + validateDetectLanguageResultCollectionWithResponse(false, getExpectedBatchDetectedLanguages(), + 200, response)) .verifyComplete()); } @@ -91,8 +96,9 @@ public void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServic public void detectLanguagesBatchListCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesCountryHintRunner((inputs, countryHint) -> - StepVerifier.create(client.detectLanguageBatch(inputs, countryHint).byPage()) - .assertNext(response -> validateDetectLanguage(false, getExpectedBatchDetectedLanguages(), response)) + StepVerifier.create(client.detectLanguageBatch(inputs, countryHint, null)) + .assertNext(actualResults -> + validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), actualResults)) .verifyComplete()); } @@ -104,8 +110,8 @@ public void detectLanguagesBatchListCountryHint(HttpClient httpClient, TextAnaly public void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesBatchListCountryHintWithOptionsRunner((inputs, options) -> - StepVerifier.create(client.detectLanguageBatch(inputs, null, options).byPage()) - .assertNext(response -> validateDetectLanguage(true, getExpectedBatchDetectedLanguages(), response)) + StepVerifier.create(client.detectLanguageBatch(inputs, null, options)) + .assertNext(response -> validateDetectLanguageResultCollection(true, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } @@ -117,8 +123,8 @@ public void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient public void detectLanguagesBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageStringInputRunner((inputs) -> - StepVerifier.create(client.detectLanguageBatch(inputs).byPage()) - .assertNext(response -> validateDetectLanguage(false, getExpectedBatchDetectedLanguages(), response)) + StepVerifier.create(client.detectLanguageBatch(inputs, null, null)) + .assertNext(response -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } @@ -180,7 +186,7 @@ public void detectLanguageFaultyText(HttpClient httpClient, TextAnalyticsService public void detectLanguageDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageDuplicateIdRunner((inputs, options) -> - StepVerifier.create(client.detectLanguageBatch(inputs, options)) + StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @@ -244,7 +250,7 @@ public void recognizeEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsS public void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityDuplicateIdRunner(inputs -> - StepVerifier.create(client.recognizeEntitiesBatch(inputs, null)) + StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @@ -253,7 +259,7 @@ public void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyti public void recognizeEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitySingleErrorRunner((inputs) -> - StepVerifier.create(client.recognizeEntitiesBatch(inputs, null)) + StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(BATCH_ERROR_EXCEPTION_MESSAGE))); } @@ -263,8 +269,8 @@ public void recognizeEntitiesBatchInputSingleError(HttpClient httpClient, TextAn public void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntityRunner((inputs) -> - StepVerifier.create(client.recognizeEntitiesBatch(inputs, null).byPage()) - .assertNext(response -> validateCategorizedEntitiesWithPagedResponse(false, getExpectedBatchCategorizedEntities(), response)) + StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) + .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(false, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @@ -273,8 +279,8 @@ public void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsS public void recognizeEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> - StepVerifier.create(client.recognizeEntitiesBatch(inputs, options).byPage()) - .assertNext(response -> validateCategorizedEntitiesWithPagedResponse(true, getExpectedBatchCategorizedEntities(), response)) + StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, options)) + .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(true, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @@ -283,8 +289,8 @@ public void recognizeEntitiesForBatchInputShowStatistics(HttpClient httpClient, public void recognizeEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityStringInputRunner((inputs) -> - StepVerifier.create(client.recognizeEntitiesBatch(inputs).byPage()) - .assertNext(response -> validateCategorizedEntitiesWithPagedResponse(false, getExpectedBatchCategorizedEntities(), response)) + StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null)) + .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @@ -293,8 +299,8 @@ public void recognizeEntitiesForBatchStringInput(HttpClient httpClient, TextAnal public void recognizeEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntitiesLanguageHintRunner((inputs, language) -> - StepVerifier.create(client.recognizeEntitiesBatch(inputs, language).byPage()) - .assertNext(response -> validateCategorizedEntitiesWithPagedResponse(false, getExpectedBatchCategorizedEntities(), response)) + StepVerifier.create(client.recognizeEntitiesBatch(inputs, language, null)) + .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @@ -303,8 +309,8 @@ public void recognizeEntitiesForListLanguageHint(HttpClient httpClient, TextAnal public void recognizeEntitiesForListWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeStringBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> - StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, options).byPage()) - .assertNext(response -> validateCategorizedEntitiesWithPagedResponse(true, getExpectedBatchCategorizedEntities(), response)) + StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, options)) + .assertNext(response -> validateCategorizedEntitiesResultCollection(true, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @@ -313,7 +319,7 @@ public void recognizeEntitiesForListWithOptions(HttpClient httpClient, TextAnaly public void recognizeEntitiesTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeEntitiesTooManyDocumentsRunner(inputs -> { - StepVerifier.create(client.recognizeEntitiesBatch(inputs)) + StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException exception = (HttpResponseException) ex; assertEquals(HttpResponseException.class, exception.getClass()); @@ -359,7 +365,7 @@ public void recognizeLinkedEntitiesForFaultyText(HttpClient httpClient, TextAnal public void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityDuplicateIdRunner(inputs -> - StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null)) + StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @@ -368,8 +374,8 @@ public void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient, TextA public void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityRunner((inputs) -> - StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null).byPage()) - .assertNext(response -> validateLinkedEntitiesWithPagedResponse(false, getExpectedBatchLinkedEntities(), response)) + StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) + .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(false, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @@ -378,8 +384,8 @@ public void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient, TextAnal public void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntitiesShowStatsRunner((inputs, options) -> - StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, options).byPage()) - .assertNext(response -> validateLinkedEntitiesWithPagedResponse(true, getExpectedBatchLinkedEntities(), response)) + StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, options)) + .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(true, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @@ -388,8 +394,8 @@ public void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpCl public void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedStringInputRunner((inputs) -> - StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs).byPage()) - .assertNext(response -> validateLinkedEntitiesWithPagedResponse(false, getExpectedBatchLinkedEntities(), response)) + StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null)) + .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @@ -398,8 +404,8 @@ public void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient, Te public void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedLanguageHintRunner((inputs, language) -> - StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, language).byPage()) - .assertNext(response -> validateLinkedEntitiesWithPagedResponse(false, getExpectedBatchLinkedEntities(), response)) + StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, language, null)) + .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @@ -408,8 +414,8 @@ public void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient, Te public void recognizeLinkedEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchStringLinkedEntitiesShowStatsRunner((inputs, options) -> - StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, options).byPage()) - .assertNext(response -> validateLinkedEntitiesWithPagedResponse(true, getExpectedBatchLinkedEntities(), response)) + StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, options)) + .assertNext(response -> validateLinkedEntitiesResultCollection(true, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @@ -418,7 +424,7 @@ public void recognizeLinkedEntitiesForListStringWithOptions(HttpClient httpClien public void recognizeLinkedEntitiesTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedEntitiesTooManyDocumentsRunner(inputs -> { - StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs)) + StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException exception = (HttpResponseException) ex; assertEquals(HttpResponseException.class, exception.getClass()); @@ -461,7 +467,7 @@ public void extractKeyPhrasesForFaultyText(HttpClient httpClient, TextAnalyticsS public void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesDuplicateIdRunner(inputs -> - StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null)) + StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @@ -470,8 +476,8 @@ public void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyti public void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesRunner((inputs) -> - StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null).byPage()) - .assertNext(response -> validateExtractKeyPhraseWithPagedResponse(false, getExpectedBatchKeyPhrases(), response)) + StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) + .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(false, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @@ -481,8 +487,8 @@ public void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsS public void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesShowStatsRunner((inputs, options) -> - StepVerifier.create(client.extractKeyPhrasesBatch(inputs, options).byPage()) - .assertNext(response -> validateExtractKeyPhraseWithPagedResponse(true, getExpectedBatchKeyPhrases(), response)) + StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, options)) + .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(true, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @@ -491,8 +497,8 @@ public void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient, public void extractKeyPhrasesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesStringInputRunner((inputs) -> - StepVerifier.create(client.extractKeyPhrasesBatch(inputs).byPage()) - .assertNext(response -> validateExtractKeyPhraseWithPagedResponse(false, getExpectedBatchKeyPhrases(), response)) + StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null)) + .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @@ -501,8 +507,8 @@ public void extractKeyPhrasesForBatchStringInput(HttpClient httpClient, TextAnal public void extractKeyPhrasesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesLanguageHintRunner((inputs, language) -> - StepVerifier.create(client.extractKeyPhrasesBatch(inputs, language).byPage()) - .assertNext(response -> validateExtractKeyPhraseWithPagedResponse(false, getExpectedBatchKeyPhrases(), response)) + StepVerifier.create(client.extractKeyPhrasesBatch(inputs, language, null)) + .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @@ -511,8 +517,8 @@ public void extractKeyPhrasesForListLanguageHint(HttpClient httpClient, TextAnal public void extractKeyPhrasesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchStringKeyPhrasesShowStatsRunner((inputs, options) -> - StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, options).byPage()) - .assertNext(response -> validateExtractKeyPhraseWithPagedResponse(true, getExpectedBatchKeyPhrases(), response)) + StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, options)) + .assertNext(response -> validateExtractKeyPhrasesResultCollection(true, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @@ -537,14 +543,13 @@ public void extractKeyPhrasesWarning(HttpClient httpClient, TextAnalyticsService public void extractKeyPhrasesBatchWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesBatchWarningRunner( - inputs -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null)) - .assertNext(keyPhrasesResult -> { + inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) + .assertNext(response -> response.getValue().forEach(keyPhrasesResult -> keyPhrasesResult.getKeyPhrases().getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); - }); - }) - .expectNextCount(1) + }) + )) .verifyComplete() ); } @@ -558,11 +563,11 @@ public void extractKeyPhrasesBatchWarning(HttpClient httpClient, TextAnalyticsSe @MethodSource("com.azure.ai.textanalytics.TestUtils#getTestParameters") public void analyseSentimentForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); - final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment(TextSentiment.MIXED.toString(), + final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment(TextSentiment.MIXED, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( - new SentenceSentiment("", TextSentiment.NEGATIVE.toString(), new SentimentConfidenceScores(0.0, 0.0, 0.0)), - new SentenceSentiment("", TextSentiment.POSITIVE.toString(), new SentimentConfidenceScores(0.0, 0.0, 0.0)) + new SentenceSentiment("", TextSentiment.NEGATIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)), + new SentenceSentiment("", TextSentiment.POSITIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier @@ -590,11 +595,11 @@ public void analyseSentimentForEmptyText(HttpClient httpClient, TextAnalyticsSer public void analyseSentimentForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment( - TextSentiment.NEUTRAL.toString(), + TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( - new SentenceSentiment("", TextSentiment.NEUTRAL.toString(), new SentimentConfidenceScores(0.0, 0.0, 0.0)), - new SentenceSentiment("", TextSentiment.NEUTRAL.toString(), new SentimentConfidenceScores(0.0, 0.0, 0.0)) + new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)), + new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier.create(client.analyzeSentiment("!@#%%")) @@ -609,7 +614,7 @@ public void analyseSentimentForFaultyText(HttpClient httpClient, TextAnalyticsSe public void analyseSentimentDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyseBatchSentimentDuplicateIdRunner(inputs -> - StepVerifier.create(client.analyzeSentimentBatch(inputs, null)) + StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @@ -621,8 +626,8 @@ public void analyseSentimentDuplicateIdInput(HttpClient httpClient, TextAnalytic public void analyseSentimentForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyseSentimentStringInputRunner(inputs -> - StepVerifier.create(client.analyzeSentimentBatch(inputs).byPage()) - .assertNext(response -> validateSentimentWithPagedResponse(false, getExpectedBatchTextSentiment(), response)) + StepVerifier.create(client.analyzeSentimentBatch(inputs, null, null)) + .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } @@ -634,8 +639,8 @@ public void analyseSentimentForBatchStringInput(HttpClient httpClient, TextAnaly public void analyseSentimentForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyseSentimentLanguageHintRunner((inputs, language) -> - StepVerifier.create(client.analyzeSentimentBatch(inputs, language).byPage()) - .assertNext(response -> validateSentimentWithPagedResponse(false, getExpectedBatchTextSentiment(), response)) + StepVerifier.create(client.analyzeSentimentBatch(inputs, language, null)) + .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } @@ -647,8 +652,8 @@ public void analyseSentimentForListLanguageHint(HttpClient httpClient, TextAnaly public void analyseSentimentForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyseBatchStringSentimentShowStatsRunner((inputs, options) -> - StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options).byPage()) - .assertNext(response -> validateSentimentWithPagedResponse(true, getExpectedBatchTextSentiment(), response)) + StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options)) + .assertNext(response -> validateSentimentResultCollection(true, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } @@ -660,8 +665,8 @@ public void analyseSentimentForListStringWithOptions(HttpClient httpClient, Text public void analyseSentimentForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyseBatchSentimentRunner(inputs -> - StepVerifier.create(client.analyzeSentimentBatch(inputs, null).byPage()) - .assertNext(response -> validateSentimentWithPagedResponse(false, getExpectedBatchTextSentiment(), response)) + StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) + .assertNext(response -> validateSentimentResultCollectionWithResponse(false, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } @@ -673,8 +678,8 @@ public void analyseSentimentForBatchInput(HttpClient httpClient, TextAnalyticsSe public void analyseSentimentForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyseBatchSentimentShowStatsRunner((inputs, options) -> - StepVerifier.create(client.analyzeSentimentBatch(inputs, options).byPage()) - .assertNext(response -> validateSentimentWithPagedResponse(true, getExpectedBatchTextSentiment(), response)) + StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, options)) + .assertNext(response -> validateSentimentResultCollectionWithResponse(true, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/TextAnalyticsClientBuilderTest.java b/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/TextAnalyticsClientBuilderTest.java index f0b715f18a7c..77aa69ce28de 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/TextAnalyticsClientBuilderTest.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/TextAnalyticsClientBuilderTest.java @@ -139,7 +139,7 @@ public void clientBuilderWithDefaultCountryHintForBatchOperation(HttpClient http clientBuilderWithDefaultCountryHintForBatchOperationRunner(httpClient, serviceVersion, clientBuilder -> (input, output) -> { final List result = - clientBuilder.buildClient().detectLanguageBatch(input).stream().collect(Collectors.toList()); + clientBuilder.buildClient().detectLanguageBatch(input, "MX", null).stream().collect(Collectors.toList()); for (int i = 0; i < result.size(); i++) { validatePrimaryLanguage(output.get(i), result.get(i).getPrimaryLanguage()); } @@ -156,7 +156,7 @@ public void clientBuilderWithNewCountryHintForBatchOperation(HttpClient httpClie clientBuilderWithNewCountryHintForBatchOperationRunner(httpClient, serviceVersion, clientBuilder -> (input, output) -> { final List result = - clientBuilder.buildClient().detectLanguageBatch(input, "US").stream().collect(Collectors.toList()); + clientBuilder.buildClient().detectLanguageBatch(input, "US", null).stream().collect(Collectors.toList()); for (int i = 0; i < result.size(); i++) { validatePrimaryLanguage(output.get(i), result.get(i).getPrimaryLanguage()); } @@ -193,7 +193,7 @@ public void clientBuilderWithDefaultLanguageForBatchOperation(HttpClient httpCli clientBuilderWithDefaultLanguageForBatchOperationRunner(httpClient, serviceVersion, clientBuilder -> (input, output) -> { final List result = - clientBuilder.buildClient().extractKeyPhrasesBatch(input).stream().collect(Collectors.toList()); + clientBuilder.buildClient().extractKeyPhrasesBatch(input, "FR", null).stream().collect(Collectors.toList()); for (int i = 0; i < result.size(); i++) { validateKeyPhrases(output.get(i), result.get(i).getKeyPhrases().stream().collect(Collectors.toList())); } @@ -210,7 +210,7 @@ public void clientBuilderWithNewLanguageForBatchOperation(HttpClient httpClient, clientBuilderWithNewLanguageForBatchOperationRunner(httpClient, serviceVersion, clientBuilder -> (input, output) -> { final List result = - clientBuilder.buildClient().extractKeyPhrasesBatch(input, "EN").stream() + clientBuilder.buildClient().extractKeyPhrasesBatch(input, "EN", null).stream() .collect(Collectors.toList()); for (int i = 0; i < result.size(); i++) { validateKeyPhrases(output.get(i), result.get(i).getKeyPhrases().stream().collect(Collectors.toList())); diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/TextAnalyticsClientTest.java b/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/TextAnalyticsClientTest.java index 08146c07cbf0..5b0822ab572b 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/TextAnalyticsClientTest.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/TextAnalyticsClientTest.java @@ -8,14 +8,14 @@ import com.azure.ai.textanalytics.models.DocumentSentiment; import com.azure.ai.textanalytics.models.LinkedEntity; import com.azure.ai.textanalytics.models.LinkedEntityMatch; -import com.azure.ai.textanalytics.models.RecognizeEntitiesResult; +import com.azure.ai.textanalytics.util.RecognizeEntitiesResultCollection; import com.azure.ai.textanalytics.models.SentenceSentiment; import com.azure.ai.textanalytics.models.SentimentConfidenceScores; import com.azure.ai.textanalytics.models.TextAnalyticsException; import com.azure.ai.textanalytics.models.TextSentiment; -import com.azure.ai.textanalytics.util.TextAnalyticsPagedIterable; import com.azure.core.exception.HttpResponseException; import com.azure.core.http.HttpClient; +import com.azure.core.http.rest.Response; import com.azure.core.util.Context; import com.azure.core.util.IterableStream; import org.junit.jupiter.params.ParameterizedTest; @@ -56,9 +56,9 @@ private TextAnalyticsClient getTextAnalyticsClient(HttpClient httpClient, @MethodSource("com.azure.ai.textanalytics.TestUtils#getTestParameters") public void detectLanguagesBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); - detectLanguageShowStatisticsRunner((inputs, options) -> validateDetectLanguage(true, - getExpectedBatchDetectedLanguages(), - client.detectLanguageBatch(inputs, options, Context.NONE).streamByPage().findFirst().get())); + detectLanguageShowStatisticsRunner((inputs, options) -> validateDetectLanguageResultCollectionWithResponse(true, + getExpectedBatchDetectedLanguages(), 200, + client.detectLanguageBatchWithResponse(inputs, options, Context.NONE))); } /** @@ -68,8 +68,9 @@ public void detectLanguagesBatchInputShowStatistics(HttpClient httpClient, TextA @MethodSource("com.azure.ai.textanalytics.TestUtils#getTestParameters") public void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); - detectLanguageRunner((inputs) -> validateDetectLanguage(false, - getExpectedBatchDetectedLanguages(), client.detectLanguageBatch(inputs, null, Context.NONE).streamByPage().findFirst().get())); + detectLanguageRunner((inputs) -> validateDetectLanguageResultCollectionWithResponse(false, + getExpectedBatchDetectedLanguages(), 200, + client.detectLanguageBatchWithResponse(inputs, null, Context.NONE))); } /** @@ -79,9 +80,9 @@ public void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServic @MethodSource("com.azure.ai.textanalytics.TestUtils#getTestParameters") public void detectLanguagesBatchListCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); - detectLanguagesCountryHintRunner((inputs, countryHint) -> validateDetectLanguage( + detectLanguagesCountryHintRunner((inputs, countryHint) -> validateDetectLanguageResultCollection( false, getExpectedBatchDetectedLanguages(), - client.detectLanguageBatch(inputs, countryHint).streamByPage().findFirst().get())); + client.detectLanguageBatch(inputs, countryHint, null))); } /** @@ -91,9 +92,8 @@ false, getExpectedBatchDetectedLanguages(), @MethodSource("com.azure.ai.textanalytics.TestUtils#getTestParameters") public void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); - detectLanguagesBatchListCountryHintWithOptionsRunner((inputs, options) -> validateDetectLanguage(true, - getExpectedBatchDetectedLanguages(), - client.detectLanguageBatch(inputs, null, options).streamByPage().findFirst().get())); + detectLanguagesBatchListCountryHintWithOptionsRunner((inputs, options) -> validateDetectLanguageResultCollection(true, + getExpectedBatchDetectedLanguages(), client.detectLanguageBatch(inputs, null, options))); } /** @@ -103,9 +103,8 @@ public void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient @MethodSource("com.azure.ai.textanalytics.TestUtils#getTestParameters") public void detectLanguagesBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); - detectLanguageStringInputRunner((inputs) -> validateDetectLanguage( - false, getExpectedBatchDetectedLanguages(), - client.detectLanguageBatch(inputs).streamByPage().findFirst().get())); + detectLanguageStringInputRunner((inputs) -> validateDetectLanguageResultCollection( + false, getExpectedBatchDetectedLanguages(), client.detectLanguageBatch(inputs, null, null))); } /** @@ -183,7 +182,7 @@ public void detectLanguageDuplicateIdInput(HttpClient httpClient, TextAnalyticsS client = getTextAnalyticsClient(httpClient, serviceVersion); detectLanguageDuplicateIdRunner((inputs, options) -> { HttpResponseException response = assertThrows(HttpResponseException.class, - () -> client.detectLanguageBatch(inputs, options, Context.NONE).stream().findFirst().get()); + () -> client.detectLanguageBatchWithResponse(inputs, options, Context.NONE)); assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode()); }); } @@ -219,7 +218,7 @@ public void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyti client = getTextAnalyticsClient(httpClient, serviceVersion); recognizeCategorizedEntityDuplicateIdRunner(inputs -> { HttpResponseException response = assertThrows(HttpResponseException.class, - () -> client.recognizeEntitiesBatch(inputs, null, Context.NONE).stream().findFirst().get()); + () -> client.recognizeEntitiesBatchWithResponse(inputs, null, Context.NONE)); assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode()); }); } @@ -229,8 +228,8 @@ public void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyti public void recognizeEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitySingleErrorRunner((inputs) -> { - TextAnalyticsPagedIterable response = client.recognizeEntitiesBatch(inputs, null, Context.NONE); - response.forEach(recognizeEntitiesResult -> { + Response response = client.recognizeEntitiesBatchWithResponse(inputs, null, Context.NONE); + response.getValue().forEach(recognizeEntitiesResult -> { Exception exception = assertThrows(TextAnalyticsException.class, recognizeEntitiesResult::getEntities); assertEquals(exception.getMessage(), BATCH_ERROR_EXCEPTION_MESSAGE); }); @@ -242,9 +241,9 @@ public void recognizeEntitiesBatchInputSingleError(HttpClient httpClient, TextAn public void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); recognizeBatchCategorizedEntityRunner((inputs) -> - client.recognizeEntitiesBatch(inputs, null, Context.NONE).iterableByPage().forEach( - pagedResponse -> - validateCategorizedEntitiesWithPagedResponse(false, getExpectedBatchCategorizedEntities(), pagedResponse))); + validateCategorizedEntitiesResultCollectionWithResponse(false, getExpectedBatchCategorizedEntities(), 200, + client.recognizeEntitiesBatchWithResponse(inputs, null, Context.NONE)) + ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @@ -252,18 +251,18 @@ public void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsS public void recognizeEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> - client.recognizeEntitiesBatch(inputs, options, Context.NONE).iterableByPage().forEach( - pagedResponse -> - validateCategorizedEntitiesWithPagedResponse(false, getExpectedBatchCategorizedEntities(), pagedResponse))); + validateCategorizedEntitiesResultCollectionWithResponse(false, getExpectedBatchCategorizedEntities(), 200, + client.recognizeEntitiesBatchWithResponse(inputs, options, Context.NONE)) + ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils#getTestParameters") public void recognizeEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); - recognizeCategorizedEntityStringInputRunner((inputs) -> client.recognizeEntitiesBatch(inputs).iterableByPage() - .forEach(pagedResponse -> - validateCategorizedEntitiesWithPagedResponse(false, getExpectedBatchCategorizedEntities(), pagedResponse))); + recognizeCategorizedEntityStringInputRunner((inputs) -> + validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), + client.recognizeEntitiesBatch(inputs, null, null))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @@ -271,9 +270,9 @@ public void recognizeEntitiesForBatchStringInput(HttpClient httpClient, TextAnal public void recognizeEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); recognizeCategorizedEntitiesLanguageHintRunner((inputs, language) -> - client.recognizeEntitiesBatch(inputs, language).iterableByPage().forEach( - pagedResponse -> - validateCategorizedEntitiesWithPagedResponse(false, getExpectedBatchCategorizedEntities(), pagedResponse))); + validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), + client.recognizeEntitiesBatch(inputs, language, null)) + ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @@ -281,9 +280,9 @@ public void recognizeEntitiesForListLanguageHint(HttpClient httpClient, TextAnal public void recognizeEntitiesForListWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); recognizeStringBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> - client.recognizeEntitiesBatch(inputs, null, options).iterableByPage().forEach( - pagedResponse -> - validateCategorizedEntitiesWithPagedResponse(false, getExpectedBatchCategorizedEntities(), pagedResponse))); + validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), + client.recognizeEntitiesBatch(inputs, null, options)) + ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @@ -292,7 +291,7 @@ public void recognizeEntitiesTooManyDocuments(HttpClient httpClient, TextAnalyti client = getTextAnalyticsClient(httpClient, serviceVersion); recognizeEntitiesTooManyDocumentsRunner(inputs -> { HttpResponseException exception = assertThrows(HttpResponseException.class, - () -> client.recognizeEntitiesBatch(inputs).stream().findFirst().get()); + () -> client.recognizeEntitiesBatch(inputs, null, null).stream().findFirst().get()); assertEquals(EXCEEDED_ALLOWED_DOCUMENTS_LIMITS_MESSAGE, exception.getMessage()); assertEquals(INVALID_DOCUMENT_BATCH, exception.getValue().toString()); }); @@ -334,7 +333,7 @@ public void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient, TextA client = getTextAnalyticsClient(httpClient, serviceVersion); recognizeBatchLinkedEntityDuplicateIdRunner(inputs -> { HttpResponseException response = assertThrows(HttpResponseException.class, - () -> client.recognizeLinkedEntitiesBatch(inputs, null, Context.NONE).stream().findFirst().get()); + () -> client.recognizeLinkedEntitiesBatchWithResponse(inputs, null, Context.NONE)); assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode()); }); } @@ -344,8 +343,9 @@ public void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient, TextA public void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); recognizeBatchLinkedEntityRunner((inputs) -> - client.recognizeLinkedEntitiesBatch(inputs, null, Context.NONE).iterableByPage().forEach(pagedResponse -> - validateLinkedEntitiesWithPagedResponse(false, getExpectedBatchLinkedEntities(), pagedResponse))); + validateLinkedEntitiesResultCollectionWithResponse(false, getExpectedBatchLinkedEntities(), 200, + client.recognizeLinkedEntitiesBatchWithResponse(inputs, null, Context.NONE)) + ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @@ -353,8 +353,8 @@ public void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient, TextAnal public void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); recognizeBatchLinkedEntitiesShowStatsRunner((inputs, options) -> - client.recognizeLinkedEntitiesBatch(inputs, options, Context.NONE).iterableByPage().forEach(pagedResponse -> - validateLinkedEntitiesWithPagedResponse(true, getExpectedBatchLinkedEntities(), pagedResponse))); + validateLinkedEntitiesResultCollectionWithResponse(true, getExpectedBatchLinkedEntities(), 200, + client.recognizeLinkedEntitiesBatchWithResponse(inputs, options, Context.NONE))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @@ -362,8 +362,7 @@ public void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpCl public void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); recognizeLinkedStringInputRunner((inputs) -> - client.recognizeLinkedEntitiesBatch(inputs).iterableByPage().forEach(pagedResponse -> - validateLinkedEntitiesWithPagedResponse(false, getExpectedBatchLinkedEntities(), pagedResponse))); + validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), client.recognizeLinkedEntitiesBatch(inputs, null, null))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @@ -371,8 +370,7 @@ public void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient, Te public void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); recognizeLinkedLanguageHintRunner((inputs, language) -> - client.recognizeLinkedEntitiesBatch(inputs, language).iterableByPage().forEach(pagedResponse -> - validateLinkedEntitiesWithPagedResponse(false, getExpectedBatchLinkedEntities(), pagedResponse))); + validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), client.recognizeLinkedEntitiesBatch(inputs, language, null))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @@ -380,8 +378,7 @@ public void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient, Te public void recognizeLinkedEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); recognizeBatchStringLinkedEntitiesShowStatsRunner((inputs, options) -> - client.recognizeLinkedEntitiesBatch(inputs, null, options).iterableByPage().forEach(pagedResponse -> - validateLinkedEntitiesWithPagedResponse(true, getExpectedBatchLinkedEntities(), pagedResponse))); + validateLinkedEntitiesResultCollection(true, getExpectedBatchLinkedEntities(), client.recognizeLinkedEntitiesBatch(inputs, null, options))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @@ -390,7 +387,7 @@ public void recognizeLinkedEntitiesTooManyDocuments(HttpClient httpClient, TextA client = getTextAnalyticsClient(httpClient, serviceVersion); recognizeLinkedEntitiesTooManyDocumentsRunner(inputs -> { HttpResponseException exception = assertThrows(HttpResponseException.class, - () -> client.recognizeLinkedEntitiesBatch(inputs).stream().findFirst().get()); + () -> client.recognizeLinkedEntitiesBatch(inputs, null, null).stream().findFirst().get()); assertEquals(EXCEEDED_ALLOWED_DOCUMENTS_LIMITS_MESSAGE, exception.getMessage()); assertEquals(INVALID_DOCUMENT_BATCH, exception.getValue().toString()); }); @@ -426,7 +423,7 @@ public void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyti client = getTextAnalyticsClient(httpClient, serviceVersion); extractBatchKeyPhrasesDuplicateIdRunner(inputs -> { HttpResponseException response = assertThrows(HttpResponseException.class, - () -> client.extractKeyPhrasesBatch(inputs, null, Context.NONE).stream().findFirst().get()); + () -> client.extractKeyPhrasesBatchWithResponse(inputs, null, Context.NONE)); assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode()); }); } @@ -436,8 +433,8 @@ public void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyti public void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); extractBatchKeyPhrasesRunner((inputs) -> - client.extractKeyPhrasesBatch(inputs, null, Context.NONE).iterableByPage().forEach(pagedResponse -> - validateExtractKeyPhraseWithPagedResponse(false, getExpectedBatchKeyPhrases(), pagedResponse))); + validateExtractKeyPhrasesResultCollectionWithResponse(false, getExpectedBatchKeyPhrases(), 200, + client.extractKeyPhrasesBatchWithResponse(inputs, null, Context.NONE))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @@ -445,8 +442,8 @@ public void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsS public void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); extractBatchKeyPhrasesShowStatsRunner((inputs, options) -> - client.extractKeyPhrasesBatch(inputs, options, Context.NONE).iterableByPage().forEach(pagedResponse -> - validateExtractKeyPhraseWithPagedResponse(true, getExpectedBatchKeyPhrases(), pagedResponse))); + validateExtractKeyPhrasesResultCollectionWithResponse(true, getExpectedBatchKeyPhrases(), 200, + client.extractKeyPhrasesBatchWithResponse(inputs, options, Context.NONE))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @@ -454,8 +451,7 @@ public void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient, public void extractKeyPhrasesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); extractKeyPhrasesStringInputRunner((inputs) -> - client.extractKeyPhrasesBatch(inputs).iterableByPage().forEach(pagedResponse -> - validateExtractKeyPhraseWithPagedResponse(false, getExpectedBatchKeyPhrases(), pagedResponse))); + validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), client.extractKeyPhrasesBatch(inputs, null, null))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @@ -463,8 +459,7 @@ public void extractKeyPhrasesForBatchStringInput(HttpClient httpClient, TextAnal public void extractKeyPhrasesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); extractKeyPhrasesLanguageHintRunner((inputs, language) -> - client.extractKeyPhrasesBatch(inputs, language).iterableByPage().forEach(pagedResponse -> - validateExtractKeyPhraseWithPagedResponse(false, getExpectedBatchKeyPhrases(), pagedResponse))); + validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), client.extractKeyPhrasesBatch(inputs, language, null))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @@ -472,8 +467,7 @@ public void extractKeyPhrasesForListLanguageHint(HttpClient httpClient, TextAnal public void extractKeyPhrasesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); extractBatchStringKeyPhrasesShowStatsRunner((inputs, options) -> - client.extractKeyPhrasesBatch(inputs, null, options).iterableByPage().forEach(pagedResponse -> - validateExtractKeyPhraseWithPagedResponse(true, getExpectedBatchKeyPhrases(), pagedResponse))); + validateExtractKeyPhrasesResultCollection(true, getExpectedBatchKeyPhrases(), client.extractKeyPhrasesBatch(inputs, null, options))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @@ -492,7 +486,7 @@ public void extractKeyPhrasesWarning(HttpClient httpClient, TextAnalyticsService public void extractKeyPhrasesBatchWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); extractKeyPhrasesBatchWarningRunner(inputs -> - client.extractKeyPhrasesBatch(inputs, null, Context.NONE).forEach(keyPhrasesResult -> + client.extractKeyPhrasesBatchWithResponse(inputs, null, Context.NONE).getValue().forEach(keyPhrasesResult -> keyPhrasesResult.getKeyPhrases().getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); @@ -510,11 +504,11 @@ public void extractKeyPhrasesBatchWarning(HttpClient httpClient, TextAnalyticsSe public void analyseSentimentForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment( - TextSentiment.MIXED.toString(), + TextSentiment.MIXED, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( - new SentenceSentiment("", TextSentiment.NEGATIVE.toString(), new SentimentConfidenceScores(0.0, 0.0, 0.0)), - new SentenceSentiment("", TextSentiment.POSITIVE.toString(), new SentimentConfidenceScores(0.0, 0.0, 0.0)) + new SentenceSentiment("", TextSentiment.NEGATIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)), + new SentenceSentiment("", TextSentiment.POSITIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); DocumentSentiment analyzeSentimentResult = client.analyzeSentiment("The hotel was dark and unclean. The restaurant had amazing gnocchi."); @@ -540,11 +534,11 @@ public void analyseSentimentForEmptyText(HttpClient httpClient, TextAnalyticsSer @MethodSource("com.azure.ai.textanalytics.TestUtils#getTestParameters") public void analyseSentimentForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); - final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment(TextSentiment.NEUTRAL.toString(), + final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment(TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( - new SentenceSentiment("", TextSentiment.NEUTRAL.toString(), new SentimentConfidenceScores(0.0, 0.0, 0.0)), - new SentenceSentiment("", TextSentiment.NEUTRAL.toString(), new SentimentConfidenceScores(0.0, 0.0, 0.0)) + new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)), + new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); DocumentSentiment analyzeSentimentResult = client.analyzeSentiment("!@#%%"); @@ -561,7 +555,7 @@ public void analyseSentimentDuplicateIdInput(HttpClient httpClient, TextAnalytic client = getTextAnalyticsClient(httpClient, serviceVersion); analyseBatchSentimentDuplicateIdRunner(inputs -> { HttpResponseException response = assertThrows(HttpResponseException.class, - () -> client.analyzeSentimentBatch(inputs, null, Context.NONE).stream().findFirst().get()); + () -> client.analyzeSentimentBatchWithResponse(inputs, null, Context.NONE)); assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode()); }); } @@ -574,8 +568,8 @@ public void analyseSentimentDuplicateIdInput(HttpClient httpClient, TextAnalytic public void analyseSentimentForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); analyseSentimentStringInputRunner(inputs -> - client.analyzeSentimentBatch(inputs).iterableByPage().forEach(pagedResponse -> - validateSentimentWithPagedResponse(false, getExpectedBatchTextSentiment(), pagedResponse))); + validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), + client.analyzeSentimentBatch(inputs, null, null))); } /** @@ -586,8 +580,8 @@ public void analyseSentimentForBatchStringInput(HttpClient httpClient, TextAnaly public void analyseSentimentForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); analyseSentimentLanguageHintRunner((inputs, language) -> - client.analyzeSentimentBatch(inputs, language).iterableByPage().forEach(pagedResponse -> - validateSentimentWithPagedResponse(false, getExpectedBatchTextSentiment(), pagedResponse))); + validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), + client.analyzeSentimentBatch(inputs, language, null))); } /** @@ -598,8 +592,8 @@ public void analyseSentimentForListLanguageHint(HttpClient httpClient, TextAnaly public void analyseSentimentForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); analyseBatchStringSentimentShowStatsRunner((inputs, options) -> - client.analyzeSentimentBatch(inputs, null, options).iterableByPage().forEach(pagedResponse -> - validateSentimentWithPagedResponse(true, getExpectedBatchTextSentiment(), pagedResponse))); + validateSentimentResultCollection(true, getExpectedBatchTextSentiment(), + client.analyzeSentimentBatch(inputs, null, options))); } /** @@ -610,8 +604,8 @@ public void analyseSentimentForListStringWithOptions(HttpClient httpClient, Text public void analyseSentimentForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); analyseBatchSentimentRunner(inputs -> - client.analyzeSentimentBatch(inputs, null, Context.NONE).iterableByPage().forEach(pagedResponse -> - validateSentimentWithPagedResponse(false, getExpectedBatchTextSentiment(), pagedResponse))); + validateSentimentResultCollectionWithResponse(false, getExpectedBatchTextSentiment(), 200, + client.analyzeSentimentBatchWithResponse(inputs, null, Context.NONE))); } /** @@ -622,7 +616,7 @@ public void analyseSentimentForBatchInput(HttpClient httpClient, TextAnalyticsSe public void analyseSentimentForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsClient(httpClient, serviceVersion); analyseBatchSentimentShowStatsRunner((inputs, options) -> - client.analyzeSentimentBatch(inputs, options, Context.NONE).iterableByPage().forEach(pagedResponse -> - validateSentimentWithPagedResponse(true, getExpectedBatchTextSentiment(), pagedResponse))); + validateSentimentResultCollectionWithResponse(true, getExpectedBatchTextSentiment(), 200, + client.analyzeSentimentBatchWithResponse(inputs, options, Context.NONE))); } } diff --git a/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/TextAnalyticsClientTestBase.java b/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/TextAnalyticsClientTestBase.java index 99488b4fb292..4d0f7e444648 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/TextAnalyticsClientTestBase.java +++ b/sdk/textanalytics/azure-ai-textanalytics/src/test/java/com/azure/ai/textanalytics/TextAnalyticsClientTestBase.java @@ -3,17 +3,12 @@ package com.azure.ai.textanalytics; -import com.azure.ai.textanalytics.models.AnalyzeSentimentResult; import com.azure.ai.textanalytics.models.CategorizedEntity; import com.azure.ai.textanalytics.models.DetectLanguageInput; -import com.azure.ai.textanalytics.models.DetectLanguageResult; import com.azure.ai.textanalytics.models.DetectedLanguage; import com.azure.ai.textanalytics.models.DocumentSentiment; -import com.azure.ai.textanalytics.models.ExtractKeyPhraseResult; import com.azure.ai.textanalytics.models.LinkedEntity; import com.azure.ai.textanalytics.models.LinkedEntityMatch; -import com.azure.ai.textanalytics.models.RecognizeEntitiesResult; -import com.azure.ai.textanalytics.models.RecognizeLinkedEntitiesResult; import com.azure.ai.textanalytics.models.SentenceSentiment; import com.azure.ai.textanalytics.models.TextAnalyticsError; import com.azure.ai.textanalytics.models.TextAnalyticsRequestOptions; @@ -21,14 +16,21 @@ import com.azure.ai.textanalytics.models.TextDocumentBatchStatistics; import com.azure.ai.textanalytics.models.TextDocumentInput; import com.azure.ai.textanalytics.models.TextDocumentStatistics; -import com.azure.ai.textanalytics.util.TextAnalyticsPagedResponse; +import com.azure.ai.textanalytics.util.AnalyzeSentimentResultCollection; +import com.azure.ai.textanalytics.util.DetectLanguageResultCollection; +import com.azure.ai.textanalytics.util.ExtractKeyPhrasesResultCollection; +import com.azure.ai.textanalytics.util.RecognizeEntitiesResultCollection; +import com.azure.ai.textanalytics.util.RecognizeLinkedEntitiesResultCollection; import com.azure.core.credential.AzureKeyCredential; import com.azure.core.http.HttpClient; import com.azure.core.http.policy.HttpLogDetailLevel; import com.azure.core.http.policy.HttpLogOptions; +import com.azure.core.http.rest.Response; import com.azure.core.test.TestBase; import com.azure.core.test.TestMode; import com.azure.core.util.Configuration; +import com.azure.core.util.IterableStream; +import com.azure.identity.DefaultAzureCredentialBuilder; import org.junit.jupiter.api.Test; import java.util.Arrays; @@ -40,7 +42,6 @@ import java.util.function.Consumer; import java.util.stream.Collectors; -import static com.azure.ai.textanalytics.TestUtils.AZURE_TEXT_ANALYTICS_API_KEY; import static com.azure.ai.textanalytics.TestUtils.CATEGORIZED_ENTITY_INPUTS; import static com.azure.ai.textanalytics.TestUtils.DETECT_LANGUAGE_INPUTS; import static com.azure.ai.textanalytics.TestUtils.FAKE_API_KEY; @@ -374,54 +375,89 @@ TextAnalyticsClientBuilder getTextAnalyticsAsyncClientBuilder(HttpClient httpCli if (getTestMode() == TestMode.PLAYBACK) { builder.credential(new AzureKeyCredential(FAKE_API_KEY)); } else { - builder.credential(new AzureKeyCredential( - Configuration.getGlobalConfiguration().get(AZURE_TEXT_ANALYTICS_API_KEY))); + builder.credential(new DefaultAzureCredentialBuilder().build()); } return builder; } - static void validateDetectLanguage(boolean showStatistics, TextAnalyticsPagedResponse expected, - TextAnalyticsPagedResponse actual) { + static void validateDetectLanguageResultCollectionWithResponse(boolean showStatistics, + DetectLanguageResultCollection expected, + int expectedStatusCode, + Response response) { + assertNotNull(response); + assertEquals(expectedStatusCode, response.getStatusCode()); + validateDetectLanguageResultCollection(showStatistics, expected, response.getValue()); + } + + static void validateDetectLanguageResultCollection(boolean showStatistics, + DetectLanguageResultCollection expected, + DetectLanguageResultCollection actual) { validateTextAnalyticsResult(showStatistics, expected, actual, (expectedItem, actualItem) -> validatePrimaryLanguage(expectedItem.getPrimaryLanguage(), actualItem.getPrimaryLanguage())); } - static void validateCategorizedEntitiesWithPagedResponse(boolean showStatistics, - TextAnalyticsPagedResponse expected, - TextAnalyticsPagedResponse actual) { + static void validateCategorizedEntitiesResultCollectionWithResponse(boolean showStatistics, + RecognizeEntitiesResultCollection expected, + int expectedStatusCode, Response response) { + assertNotNull(response); + assertEquals(expectedStatusCode, response.getStatusCode()); + validateCategorizedEntitiesResultCollection(showStatistics, expected, response.getValue()); + } + static void validateCategorizedEntitiesResultCollection(boolean showStatistics, + RecognizeEntitiesResultCollection expected, + RecognizeEntitiesResultCollection actual) { validateTextAnalyticsResult(showStatistics, expected, actual, (expectedItem, actualItem) -> validateCategorizedEntities( expectedItem.getEntities().stream().collect(Collectors.toList()), actualItem.getEntities().stream().collect(Collectors.toList()))); } - static void validateCategorizedEntities( - TextAnalyticsPagedResponse expected, TextAnalyticsPagedResponse actual) { - validateCategorizedEntities(expected.getValue(), actual.getValue()); + static void validateLinkedEntitiesResultCollectionWithResponse(boolean showStatistics, + RecognizeLinkedEntitiesResultCollection expected, + int expectedStatusCode, Response response) { + assertNotNull(response); + assertEquals(expectedStatusCode, response.getStatusCode()); + validateLinkedEntitiesResultCollection(showStatistics, expected, response.getValue()); } - static void validateLinkedEntitiesWithPagedResponse(boolean showStatistics, - TextAnalyticsPagedResponse expected, - TextAnalyticsPagedResponse actual) { + static void validateLinkedEntitiesResultCollection(boolean showStatistics, + RecognizeLinkedEntitiesResultCollection expected, + RecognizeLinkedEntitiesResultCollection actual) { validateTextAnalyticsResult(showStatistics, expected, actual, (expectedItem, actualItem) -> validateLinkedEntities( expectedItem.getEntities().stream().collect(Collectors.toList()), actualItem.getEntities().stream().collect(Collectors.toList()))); } - static void validateExtractKeyPhraseWithPagedResponse(boolean showStatistics, - TextAnalyticsPagedResponse expected, - TextAnalyticsPagedResponse actual) { + static void validateExtractKeyPhrasesResultCollectionWithResponse(boolean showStatistics, + ExtractKeyPhrasesResultCollection expected, + int expectedStatusCode, Response response) { + assertNotNull(response); + assertEquals(expectedStatusCode, response.getStatusCode()); + validateExtractKeyPhrasesResultCollection(showStatistics, expected, response.getValue()); + } + + static void validateExtractKeyPhrasesResultCollection(boolean showStatistics, + ExtractKeyPhrasesResultCollection expected, + ExtractKeyPhrasesResultCollection actual) { validateTextAnalyticsResult(showStatistics, expected, actual, (expectedItem, actualItem) -> validateKeyPhrases( expectedItem.getKeyPhrases().stream().collect(Collectors.toList()), actualItem.getKeyPhrases().stream().collect(Collectors.toList()))); } - static void validateSentimentWithPagedResponse(boolean showStatistics, - TextAnalyticsPagedResponse expected, - TextAnalyticsPagedResponse actual) { + static void validateSentimentResultCollectionWithResponse(boolean showStatistics, + AnalyzeSentimentResultCollection expected, + int expectedStatusCode, Response response) { + assertNotNull(response); + assertEquals(expectedStatusCode, response.getStatusCode()); + validateSentimentResultCollection(showStatistics, expected, response.getValue()); + } + + static void validateSentimentResultCollection(boolean showStatistics, + AnalyzeSentimentResultCollection expected, + AnalyzeSentimentResultCollection actual) { validateTextAnalyticsResult(showStatistics, expected, actual, (expectedItem, actualItem) -> validateAnalyzedSentiment(expectedItem.getDocumentSentiment(), actualItem.getDocumentSentiment())); } @@ -564,19 +600,33 @@ static void validateAnalyzedSentiment(DocumentSentiment expectedSentiment, Docum /** * Helper method to verify {@link TextAnalyticsResult documents} returned in a batch request. */ - private static void validateTextAnalyticsResult(boolean showStatistics, - TextAnalyticsPagedResponse expectedResults, TextAnalyticsPagedResponse actualResults, - BiConsumer additionalAssertions) { + static > void validateTextAnalyticsResult( + boolean showStatistics, H expectedResults, H actualResults, BiConsumer additionalAssertions) { - final Map expected = expectedResults.getElements().stream().collect( + final Map expected = expectedResults.stream().collect( Collectors.toMap(TextAnalyticsResult::getId, r -> r)); - final Map actual = actualResults.getElements().stream().collect( + final Map actual = actualResults.stream().collect( Collectors.toMap(TextAnalyticsResult::getId, r -> r)); assertEquals(expected.size(), actual.size()); if (showStatistics) { - validateBatchStatistics(expectedResults.getStatistics(), actualResults.getStatistics()); + if (expectedResults instanceof AnalyzeSentimentResultCollection) { + validateBatchStatistics(((AnalyzeSentimentResultCollection) expectedResults).getStatistics(), + ((AnalyzeSentimentResultCollection) actualResults).getStatistics()); + } else if (expectedResults instanceof DetectLanguageResultCollection) { + validateBatchStatistics(((DetectLanguageResultCollection) expectedResults).getStatistics(), + ((DetectLanguageResultCollection) actualResults).getStatistics()); + } else if (expectedResults instanceof ExtractKeyPhrasesResultCollection) { + validateBatchStatistics(((ExtractKeyPhrasesResultCollection) expectedResults).getStatistics(), + ((ExtractKeyPhrasesResultCollection) actualResults).getStatistics()); + } else if (expectedResults instanceof RecognizeEntitiesResultCollection) { + validateBatchStatistics(((RecognizeEntitiesResultCollection) expectedResults).getStatistics(), + ((RecognizeEntitiesResultCollection) actualResults).getStatistics()); + } else if (expectedResults instanceof RecognizeLinkedEntitiesResultCollection) { + validateBatchStatistics(((RecognizeLinkedEntitiesResultCollection) expectedResults).getStatistics(), + ((RecognizeLinkedEntitiesResultCollection) actualResults).getStatistics()); + } } expected.forEach((key, expectedValue) -> { diff --git a/sdk/textanalytics/test-resources.json b/sdk/textanalytics/test-resources.json new file mode 100644 index 000000000000..b9f67cb48fdc --- /dev/null +++ b/sdk/textanalytics/test-resources.json @@ -0,0 +1,96 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "baseName": { + "type": "string", + "defaultValue": "[resourceGroup().name]", + "metadata": { + "description": "The base resource name." + } + }, + "endpointSuffix": { + "defaultValue": ".cognitiveservices.azure.com/", + "type": "String" + }, + "testApplicationOid": { + "type": "String", + "metadata": { + "description": "The principal to assign the role to. This is application object id." + } + }, + "tenantId": { + "type": "String", + "metadata": { + "description": "The tenant id to which the application and resources belong." + } + }, + "testApplicationId": { + "type": "String", + "metadata": { + "description": "The application client id used to run tests." + } + }, + "testApplicationSecret": { + "type": "String", + "metadata": { + "description": "The application client secret used to run tests." + } + } + }, + "variables": { + "authorizationApiVersion": "2018-09-01-preview", + "textAnalyticsApiVersion": "2017-04-18", + "azureTextAnalyticsUrl": "[concat('https://', parameters('baseName'), parameters('endpointSuffix'))]", + "cognitiveServiceUserRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/a97b65f3-24c7-4388-baec-2e87135dc908')]" + }, + "resources": [ + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "[variables('authorizationApiVersion')]", + "name": "[guid(concat(variables('cognitiveServiceUserRoleId'), parameters('baseName')))]", + "dependsOn": [ + "[parameters('baseName')]" + ], + "properties": { + "principalId": "[parameters('testApplicationOid')]", + "roleDefinitionId": "[variables('cognitiveServiceUserRoleId')]" + } + }, + { + "type": "Microsoft.CognitiveServices/accounts", + "name": "[parameters('baseName')]", + "apiVersion": "[variables('textAnalyticsApiVersion')]", + "sku": { + "name": "S" + }, + "kind": "TextAnalytics", + "location": "[resourceGroup().location]", + "properties": { + "customSubDomainName": "[parameters('baseName')]" + } + } + ], + "outputs": { + "AZURE_TENANT_ID": { + "type": "String", + "value": "[parameters('tenantId')]" + }, + "AZURE_CLIENT_ID": { + "type": "String", + "value": "[parameters('testApplicationId')]" + }, + "AZURE_CLIENT_SECRET": { + "type": "String", + "value": "[parameters('testApplicationSecret')]" + }, + "AZURE_TEXT_ANALYTICS_API_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.CognitiveServices/accounts', parameters('baseName')), variables('textAnalyticsApiVersion')).key1]" + }, + "AZURE_TEXT_ANALYTICS_ENDPOINT": { + "type": "string", + "value": "[variables('azureTextAnalyticsUrl')]" + } + } +} diff --git a/sdk/textanalytics/tests.yml b/sdk/textanalytics/tests.yml index 2fd8f466e980..53543a2eb931 100644 --- a/sdk/textanalytics/tests.yml +++ b/sdk/textanalytics/tests.yml @@ -10,5 +10,3 @@ jobs: safeName: azureaitextanalytics EnvVars: AZURE_TEST_MODE: RECORD - AZURE_TEXT_ANALYTICS_API_KEY: $(java-textanalytics-test-api-key) - AZURE_TEXT_ANALYTICS_ENDPOINT: $(java-textanalytics-test-ppe-endpoint-string)