Skip to content

Commit

Permalink
Rename some properties, parameters for consistency
Browse files Browse the repository at this point in the history
  • Loading branch information
heaths committed Jun 5, 2020
1 parent 6027caf commit 0a0ed21
Show file tree
Hide file tree
Showing 8 changed files with 48 additions and 46 deletions.
5 changes: 4 additions & 1 deletion sdk/search/Azure.Search.Documents/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,10 @@

- Moved models for managing indexes, indexers, and skillsets to `Azure.Search.Documents.Indexes.Models`.
- Split `SearchServiceClient` into `SearchIndexClient` for managing indexes, and `SearchIndexerClient` for managing indexers, both of which are now in `Azure.Search.Documents.Indexes`.
- Renamed `AnalyzeRequest` to `AnalyzeTextOptions`, and overloaded constructors to make constructing it
- Renamed `AnalyzeRequest` to `AnalyzeTextOptions`, and overloaded constructors with required parameters.
- Renamed `AnalyzeTextOptions.Analyzer` to `AnalyzeTextOptions.AnalyzerName`.
- Renamed `AnalyzeTextOptions.Tokenizer` to `AnalyzeTextOptions.TokenizerName`.
- Renamed `CustomAnalyzer.Tokenizer` to `CustomAnalyzer.TokenizerName`.
- Renamed `SearchIndexerDataSource` to `SearchIndexerDataSourceConnection`.
- Renamed methods on `SearchIndexerClient` matching "\*DataSource" to "\*DataSourceConnection".
- Made collection- and dictionary-type properties read-only, i.e. has only get-accessors, based on [.NET Guidelines][net-guidelines-collection-properties].
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -215,13 +215,13 @@ internal AnalyzedTokenInfo() { }
public partial class AnalyzeTextOptions
{
public AnalyzeTextOptions(string text) { }
public AnalyzeTextOptions(string text, Azure.Search.Documents.Indexes.Models.LexicalAnalyzerName analyzer) { }
public AnalyzeTextOptions(string text, Azure.Search.Documents.Indexes.Models.LexicalTokenizerName tokenizer) { }
public Azure.Search.Documents.Indexes.Models.LexicalAnalyzerName? Analyzer { get { throw null; } }
public AnalyzeTextOptions(string text, Azure.Search.Documents.Indexes.Models.LexicalAnalyzerName analyzerName) { }
public AnalyzeTextOptions(string text, Azure.Search.Documents.Indexes.Models.LexicalTokenizerName tokenizerName) { }
public Azure.Search.Documents.Indexes.Models.LexicalAnalyzerName? AnalyzerName { get { throw null; } }
public System.Collections.Generic.IList<string> CharFilters { get { throw null; } }
public string Text { get { throw null; } }
public System.Collections.Generic.IList<Azure.Search.Documents.Indexes.Models.TokenFilterName> TokenFilters { get { throw null; } }
public Azure.Search.Documents.Indexes.Models.LexicalTokenizerName? Tokenizer { get { throw null; } }
public Azure.Search.Documents.Indexes.Models.LexicalTokenizerName? TokenizerName { get { throw null; } }
}
public partial class AsciiFoldingTokenFilter : Azure.Search.Documents.Indexes.Models.TokenFilter
{
Expand Down Expand Up @@ -295,10 +295,10 @@ public CorsOptions(System.Collections.Generic.IEnumerable<string> allowedOrigins
}
public partial class CustomAnalyzer : Azure.Search.Documents.Indexes.Models.LexicalAnalyzer
{
public CustomAnalyzer(string name, Azure.Search.Documents.Indexes.Models.LexicalTokenizerName tokenizer) { }
public CustomAnalyzer(string name, Azure.Search.Documents.Indexes.Models.LexicalTokenizerName tokenizerName) { }
public System.Collections.Generic.IList<string> CharFilters { get { throw null; } }
public System.Collections.Generic.IList<Azure.Search.Documents.Indexes.Models.TokenFilterName> TokenFilters { get { throw null; } }
public Azure.Search.Documents.Indexes.Models.LexicalTokenizerName Tokenizer { get { throw null; } set { } }
public Azure.Search.Documents.Indexes.Models.LexicalTokenizerName TokenizerName { get { throw null; } set { } }
}
public partial class DataChangeDetectionPolicy
{
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,12 @@ public partial class AnalyzeTextOptions
/// Initializes a new instance of AnalyzeRequest.
/// </summary>
/// <param name="text">Required text to break into tokens.</param>
/// <param name="analyzer">The name of the analyzer to use to break the given <paramref name="text"/>.</param>
/// <param name="analyzerName">The name of the analyzer to use to break the given <paramref name="text"/>.</param>
/// <exception cref="ArgumentNullException"><paramref name="text"/> is null.</exception>
public AnalyzeTextOptions(string text, LexicalAnalyzerName analyzer)
public AnalyzeTextOptions(string text, LexicalAnalyzerName analyzerName)
{
Text = text ?? throw new ArgumentNullException(nameof(text));
Analyzer = analyzer;
AnalyzerName = analyzerName;

TokenFilters = new List<TokenFilterName>();
CharFilters = new List<string>();
Expand All @@ -29,24 +29,24 @@ public AnalyzeTextOptions(string text, LexicalAnalyzerName analyzer)
/// Initializes a new instance of AnalyzeRequest.
/// </summary>
/// <param name="text">Required text to break into tokens.</param>
/// <param name="tokenizer">The name of the tokenizer to use to break the given <paramref name="text"/>.</param>
/// <param name="tokenizerName">The name of the tokenizer to use to break the given <paramref name="text"/>.</param>
/// <exception cref="ArgumentNullException"><paramref name="text"/> is null.</exception>
public AnalyzeTextOptions(string text, LexicalTokenizerName tokenizer)
public AnalyzeTextOptions(string text, LexicalTokenizerName tokenizerName)
{
Text = text ?? throw new ArgumentNullException(nameof(text));
Tokenizer = tokenizer;
TokenizerName = tokenizerName;

TokenFilters = new List<TokenFilterName>();
CharFilters = new List<string>();
}

/// <summary> The name of the analyzer to use to break the given text. If this parameter is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually exclusive. </summary>
[CodeGenMember("Analyzer")]
public LexicalAnalyzerName? Analyzer { get; }
public LexicalAnalyzerName? AnalyzerName { get; }

/// <summary> The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually exclusive. </summary>
[CodeGenMember("Tokenizer")]
public LexicalTokenizerName? Tokenizer { get; }
public LexicalTokenizerName? TokenizerName { get; }

/// <summary> An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. </summary>
[CodeGenMember(EmptyAsUndefined = true, Initialize = true)]
Expand Down
Original file line number Diff line number Diff line change
@@ -1,13 +1,30 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

using System;
using System.Collections.Generic;
using Azure.Core;

namespace Azure.Search.Documents.Indexes.Models
{
public partial class CustomAnalyzer
{
/// <summary> Initializes a new instance of CustomAnalyzer. </summary>
/// <param name="name"> The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. </param>
/// <param name="tokenizerName"> The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. </param>
/// <exception cref="ArgumentNullException"><paramref name="name"/> is null.</exception>
public CustomAnalyzer(string name, LexicalTokenizerName tokenizerName) : base(name)
{
TokenizerName = tokenizerName;
TokenFilters = new List<TokenFilterName>();
CharFilters = new List<string>();
ODataType = "#Microsoft.Azure.Search.CustomAnalyzer";
}

/// <summary> The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. </summary>
[CodeGenMember("Tokenizer")]
public LexicalTokenizerName TokenizerName { get; set; }

/// <summary> A list of token filters used to filter out or modify the tokens generated by a tokenizer. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. </summary>
[CodeGenMember(EmptyAsUndefined = true, Initialize = true)]
public IList<TokenFilterName> TokenFilters { get; }
Expand Down

0 comments on commit 0a0ed21

Please sign in to comment.