diff --git a/algoliasearch/abtesting/client.py b/algoliasearch/abtesting/client.py index afee1dca3..680e521a5 100644 --- a/algoliasearch/abtesting/client.py +++ b/algoliasearch/abtesting/client.py @@ -8,7 +8,7 @@ from json import dumps from sys import version_info -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Dict, Optional, Union from urllib.parse import quote from pydantic import Field, StrictInt, StrictStr @@ -31,8 +31,9 @@ ScheduleABTestsRequest, ) from algoliasearch.http.api_response import ApiResponse +from algoliasearch.http.base_config import BaseConfig from algoliasearch.http.request_options import RequestOptions -from algoliasearch.http.serializer import bodySerializer +from algoliasearch.http.serializer import body_serializer from algoliasearch.http.transporter import Transporter from algoliasearch.http.transporter_sync import TransporterSync from algoliasearch.http.verb import Verb @@ -57,7 +58,7 @@ class AbtestingClient: """ _transporter: Transporter - _config: AbtestingConfig + _config: BaseConfig _request_options: RequestOptions def __init__( @@ -69,7 +70,9 @@ def __init__( config: Optional[AbtestingConfig] = None, ) -> None: if transporter is not None and config is None: - config = transporter._config + config = AbtestingConfig( + transporter.config.app_id, transporter.config.api_key, region + ) if config is None: config = AbtestingConfig(app_id, api_key, region) @@ -121,11 +124,11 @@ async def close(self) -> None: async def set_client_api_key(self, api_key: str) -> None: """Sets a new API key to authenticate requests.""" - self._transporter._config.set_client_api_key(api_key) + self._transporter.config.set_client_api_key(api_key) async def add_ab_tests_with_http_info( self, - add_ab_tests_request: AddABTestsRequest, + add_ab_tests_request: Union[AddABTestsRequest, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -153,7 +156,7 @@ async def add_ab_tests_with_http_info( verb=Verb.POST, path="/2/abtests", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -161,7 +164,7 @@ async def add_ab_tests_with_http_info( async def add_ab_tests( self, - add_ab_tests_request: AddABTestsRequest, + add_ab_tests_request: Union[AddABTestsRequest, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ABTestResponse: """ @@ -211,11 +214,11 @@ async def custom_delete_with_http_info( "Parameter `path` is required when calling `custom_delete`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return await self._transporter.request( verb=Verb.DELETE, @@ -286,11 +289,11 @@ async def custom_get_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_get`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return await self._transporter.request( verb=Verb.GET, @@ -365,11 +368,11 @@ async def custom_post_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_post`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -380,7 +383,7 @@ async def custom_post_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -457,11 +460,11 @@ async def custom_put_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_put`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -472,7 +475,7 @@ async def custom_put_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -653,16 +656,16 @@ async def list_ab_tests_with_http_info( :return: Returns the raw algoliasearch 'APIResponse' object. """ - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if offset is not None: - _query_parameters.append(("offset", offset)) + _query_parameters["offset"] = offset if limit is not None: - _query_parameters.append(("limit", limit)) + _query_parameters["limit"] = limit if index_prefix is not None: - _query_parameters.append(("indexPrefix", index_prefix)) + _query_parameters["indexPrefix"] = index_prefix if index_suffix is not None: - _query_parameters.append(("indexSuffix", index_suffix)) + _query_parameters["indexSuffix"] = index_suffix return await self._transporter.request( verb=Verb.GET, @@ -721,7 +724,7 @@ async def list_ab_tests( async def schedule_ab_test_with_http_info( self, - schedule_ab_tests_request: ScheduleABTestsRequest, + schedule_ab_tests_request: Union[ScheduleABTestsRequest, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -749,7 +752,7 @@ async def schedule_ab_test_with_http_info( verb=Verb.POST, path="/2/abtests/schedule", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -757,7 +760,7 @@ async def schedule_ab_test_with_http_info( async def schedule_ab_test( self, - schedule_ab_tests_request: ScheduleABTestsRequest, + schedule_ab_tests_request: Union[ScheduleABTestsRequest, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ScheduleABTestResponse: """ @@ -844,7 +847,7 @@ class AbtestingClientSync: """ _transporter: TransporterSync - _config: AbtestingConfig + _config: BaseConfig _request_options: RequestOptions def __init__( @@ -856,7 +859,9 @@ def __init__( config: Optional[AbtestingConfig] = None, ) -> None: if transporter is not None and config is None: - config = transporter._config + config = AbtestingConfig( + transporter.config.app_id, transporter.config.api_key, region + ) if config is None: config = AbtestingConfig(app_id, api_key, region) @@ -907,11 +912,11 @@ def close(self) -> None: def set_client_api_key(self, api_key: str) -> None: """Sets a new API key to authenticate requests.""" - self._transporter._config.set_client_api_key(api_key) + self._transporter.config.set_client_api_key(api_key) def add_ab_tests_with_http_info( self, - add_ab_tests_request: AddABTestsRequest, + add_ab_tests_request: Union[AddABTestsRequest, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -939,7 +944,7 @@ def add_ab_tests_with_http_info( verb=Verb.POST, path="/2/abtests", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -947,7 +952,7 @@ def add_ab_tests_with_http_info( def add_ab_tests( self, - add_ab_tests_request: AddABTestsRequest, + add_ab_tests_request: Union[AddABTestsRequest, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ABTestResponse: """ @@ -995,11 +1000,11 @@ def custom_delete_with_http_info( "Parameter `path` is required when calling `custom_delete`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return self._transporter.request( verb=Verb.DELETE, @@ -1068,11 +1073,11 @@ def custom_get_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_get`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return self._transporter.request( verb=Verb.GET, @@ -1147,11 +1152,11 @@ def custom_post_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_post`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -1162,7 +1167,7 @@ def custom_post_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -1237,11 +1242,11 @@ def custom_put_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_put`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -1252,7 +1257,7 @@ def custom_put_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -1431,16 +1436,16 @@ def list_ab_tests_with_http_info( :return: Returns the raw algoliasearch 'APIResponse' object. """ - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if offset is not None: - _query_parameters.append(("offset", offset)) + _query_parameters["offset"] = offset if limit is not None: - _query_parameters.append(("limit", limit)) + _query_parameters["limit"] = limit if index_prefix is not None: - _query_parameters.append(("indexPrefix", index_prefix)) + _query_parameters["indexPrefix"] = index_prefix if index_suffix is not None: - _query_parameters.append(("indexSuffix", index_suffix)) + _query_parameters["indexSuffix"] = index_suffix return self._transporter.request( verb=Verb.GET, @@ -1499,7 +1504,7 @@ def list_ab_tests( def schedule_ab_test_with_http_info( self, - schedule_ab_tests_request: ScheduleABTestsRequest, + schedule_ab_tests_request: Union[ScheduleABTestsRequest, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -1527,7 +1532,7 @@ def schedule_ab_test_with_http_info( verb=Verb.POST, path="/2/abtests/schedule", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -1535,7 +1540,7 @@ def schedule_ab_test_with_http_info( def schedule_ab_test( self, - schedule_ab_tests_request: ScheduleABTestsRequest, + schedule_ab_tests_request: Union[ScheduleABTestsRequest, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ScheduleABTestResponse: """ diff --git a/algoliasearch/abtesting/config.py b/algoliasearch/abtesting/config.py index 56dbaf8a4..47a1d418b 100644 --- a/algoliasearch/abtesting/config.py +++ b/algoliasearch/abtesting/config.py @@ -7,11 +7,19 @@ class AbtestingConfig(BaseConfig): - def __init__(self, app_id: str, api_key: str, region: Optional[str] = None) -> None: + def __init__( + self, + app_id: Optional[str], + api_key: Optional[str], + region: Optional[str] = None, + ) -> None: super().__init__(app_id, api_key) user_agent = UserAgent().add("Abtesting") + assert app_id, "`app_id` is missing." + assert api_key, "`api_key` is missing." + self.headers = { "x-algolia-application-id": app_id, "x-algolia-api-key": api_key, @@ -42,7 +50,9 @@ def __init__(self, app_id: str, api_key: str, region: Optional[str] = None) -> N Host( "analytics.algolia.com" if region is None - else "analytics.{region}.algolia.com".replace("{region}", region) + else "analytics.{region}.algolia.com".replace( + "{region}", region or "" + ) ) ] ) diff --git a/algoliasearch/abtesting/models/ab_test.py b/algoliasearch/abtesting/models/ab_test.py index f50884cb5..cf41978ef 100644 --- a/algoliasearch/abtesting/models/ab_test.py +++ b/algoliasearch/abtesting/models/ab_test.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,47 +22,58 @@ from algoliasearch.abtesting.models.status import Status from algoliasearch.abtesting.models.variant import Variant +_ALIASES = { + "ab_test_id": "abTestID", + "click_significance": "clickSignificance", + "conversion_significance": "conversionSignificance", + "add_to_cart_significance": "addToCartSignificance", + "purchase_significance": "purchaseSignificance", + "revenue_significance": "revenueSignificance", + "updated_at": "updatedAt", + "created_at": "createdAt", + "end_at": "endAt", + "name": "name", + "status": "status", + "variants": "variants", + "configuration": "configuration", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ABTest(BaseModel): """ ABTest """ - ab_test_id: int = Field(alias="abTestID") + ab_test_id: int """ Unique A/B test identifier. """ - click_significance: Optional[float] = Field(default=None, alias="clickSignificance") - conversion_significance: Optional[float] = Field( - default=None, alias="conversionSignificance" - ) - add_to_cart_significance: Optional[float] = Field( - default=None, alias="addToCartSignificance" - ) - purchase_significance: Optional[float] = Field( - default=None, alias="purchaseSignificance" - ) - revenue_significance: Optional[Dict[str, float]] = Field( - default=None, alias="revenueSignificance" - ) - updated_at: str = Field(alias="updatedAt") + click_significance: Optional[float] = None + conversion_significance: Optional[float] = None + add_to_cart_significance: Optional[float] = None + purchase_significance: Optional[float] = None + revenue_significance: Optional[Dict[str, float]] = None + updated_at: str """ Date and time when the A/B test was last updated, in RFC 3339 format. """ - created_at: str = Field(alias="createdAt") + created_at: str """ Date and time when the A/B test was created, in RFC 3339 format. """ - end_at: str = Field(alias="endAt") + end_at: str """ End date and time of the A/B test, in RFC 3339 format. """ - name: str = Field(alias="name") + name: str """ A/B test name. """ - status: Status = Field(alias="status") - variants: List[Variant] = Field(alias="variants") + status: Status + variants: List[Variant] """ A/B test variants. The first variant is your _control_ index, typically your production index. The second variant is an index with changed settings that you want to test against the control. """ - configuration: Optional[ABTestConfiguration] = Field( - default=None, alias="configuration" - ) + configuration: Optional[ABTestConfiguration] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/abtesting/models/ab_test_configuration.py b/algoliasearch/abtesting/models/ab_test_configuration.py index 62b211f99..e2e34eb73 100644 --- a/algoliasearch/abtesting/models/ab_test_configuration.py +++ b/algoliasearch/abtesting/models/ab_test_configuration.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -24,23 +24,32 @@ ) from algoliasearch.abtesting.models.outliers import Outliers +_ALIASES = { + "outliers": "outliers", + "empty_search": "emptySearch", + "minimum_detectable_effect": "minimumDetectableEffect", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ABTestConfiguration(BaseModel): """ A/B test configuration. """ - outliers: Outliers = Field(alias="outliers") - empty_search: Optional[EmptySearch] = Field(default=None, alias="emptySearch") - minimum_detectable_effect: Optional[MinimumDetectableEffect] = Field( - default=None, alias="minimumDetectableEffect" - ) + outliers: Outliers + empty_search: Optional[EmptySearch] = None + minimum_detectable_effect: Optional[MinimumDetectableEffect] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/abtesting/models/ab_test_response.py b/algoliasearch/abtesting/models/ab_test_response.py index ab977ff7c..904fffab6 100644 --- a/algoliasearch/abtesting/models/ab_test_response.py +++ b/algoliasearch/abtesting/models/ab_test_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "index": "index", + "ab_test_id": "abTestID", + "task_id": "taskID", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class ABTestResponse(BaseModel): """ ABTestResponse """ - index: str = Field(alias="index") + index: str """ Index name of the A/B test variant (case-sensitive). """ - ab_test_id: int = Field(alias="abTestID") + ab_test_id: int """ Unique A/B test identifier. """ - task_id: int = Field(alias="taskID") + task_id: int """ Unique identifier of a task. A successful API response means that a task was added to a queue. It might not run immediately. You can check the task's progress with the [`task` operation](#tag/Indices/operation/getTask) and this `taskID`. """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class ABTestResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/abtesting/models/ab_tests_variant.py b/algoliasearch/abtesting/models/ab_tests_variant.py index 2eb6901cc..82d159c5c 100644 --- a/algoliasearch/abtesting/models/ab_tests_variant.py +++ b/algoliasearch/abtesting/models/ab_tests_variant.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "index": "index", + "traffic_percentage": "trafficPercentage", + "description": "description", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class AbTestsVariant(BaseModel): """ AbTestsVariant """ - index: str = Field(alias="index") + index: str """ Index name of the A/B test variant (case-sensitive). """ - traffic_percentage: int = Field(alias="trafficPercentage") + traffic_percentage: int """ Percentage of search requests each variant receives. """ - description: Optional[str] = Field(default=None, alias="description") + description: Optional[str] = None """ Description for this variant. """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class AbTestsVariant(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/abtesting/models/ab_tests_variant_search_params.py b/algoliasearch/abtesting/models/ab_tests_variant_search_params.py index 518174154..b92e0e17a 100644 --- a/algoliasearch/abtesting/models/ab_tests_variant_search_params.py +++ b/algoliasearch/abtesting/models/ab_tests_variant_search_params.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,24 +18,37 @@ from typing_extensions import Self +_ALIASES = { + "index": "index", + "traffic_percentage": "trafficPercentage", + "description": "description", + "custom_search_parameters": "customSearchParameters", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class AbTestsVariantSearchParams(BaseModel): """ AbTestsVariantSearchParams """ - index: str = Field(alias="index") + index: str """ Index name of the A/B test variant (case-sensitive). """ - traffic_percentage: int = Field(alias="trafficPercentage") + traffic_percentage: int """ Percentage of search requests each variant receives. """ - description: Optional[str] = Field(default=None, alias="description") + description: Optional[str] = None """ Description for this variant. """ - custom_search_parameters: object = Field(alias="customSearchParameters") + custom_search_parameters: object model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/abtesting/models/add_ab_tests_request.py b/algoliasearch/abtesting/models/add_ab_tests_request.py index e6586885f..63f9e77f4 100644 --- a/algoliasearch/abtesting/models/add_ab_tests_request.py +++ b/algoliasearch/abtesting/models/add_ab_tests_request.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,17 +20,27 @@ from algoliasearch.abtesting.models.add_ab_tests_variant import AddABTestsVariant +_ALIASES = { + "name": "name", + "variants": "variants", + "end_at": "endAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class AddABTestsRequest(BaseModel): """ AddABTestsRequest """ - name: str = Field(alias="name") + name: str """ A/B test name. """ - variants: List[AddABTestsVariant] = Field(alias="variants") + variants: List[AddABTestsVariant] """ A/B test variants. """ - end_at: str = Field(alias="endAt") + end_at: str """ End date and time of the A/B test, in RFC 3339 format. """ model_config = ConfigDict( @@ -38,6 +48,7 @@ class AddABTestsRequest(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/abtesting/models/add_ab_tests_variant.py b/algoliasearch/abtesting/models/add_ab_tests_variant.py index 3f506ca96..41adc608d 100644 --- a/algoliasearch/abtesting/models/add_ab_tests_variant.py +++ b/algoliasearch/abtesting/models/add_ab_tests_variant.py @@ -33,7 +33,7 @@ class AddABTestsVariant(BaseModel): oneof_schema_2_validator: Optional[AbTestsVariantSearchParams] = Field(default=None) - actual_instance: Optional[Union[AbTestsVariant, AbTestsVariantSearchParams]] = None + actual_instance: Union[AbTestsVariant, AbTestsVariantSearchParams, None] = None one_of_schemas: Set[str] = {"AbTestsVariant", "AbTestsVariantSearchParams"} def __init__(self, *args, **kwargs) -> None: @@ -46,14 +46,14 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer def unwrap_actual_instance( self, - ) -> Optional[Union[AbTestsVariant, AbTestsVariantSearchParams]]: + ) -> Union[AbTestsVariant, AbTestsVariantSearchParams, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -94,9 +94,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -108,8 +108,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/abtesting/models/currency.py b/algoliasearch/abtesting/models/currency.py index 8659a9fef..deb2a5745 100644 --- a/algoliasearch/abtesting/models/currency.py +++ b/algoliasearch/abtesting/models/currency.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,30 @@ from typing_extensions import Self +_ALIASES = { + "currency": "currency", + "revenue": "revenue", + "mean": "mean", + "standard_deviation": "standardDeviation", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class Currency(BaseModel): """ Currency """ - currency: Optional[str] = Field(default=None, alias="currency") + currency: Optional[str] = None """ Currency code. """ - revenue: Optional[float] = Field(default=None, alias="revenue") + revenue: Optional[float] = None """ Revenue for this currency. """ - mean: Optional[float] = Field(default=None, alias="mean") + mean: Optional[float] = None """ Mean for this currency. """ - standard_deviation: Optional[float] = Field(default=None, alias="standardDeviation") + standard_deviation: Optional[float] = None """ Standard deviation for this currency. """ model_config = ConfigDict( @@ -37,6 +49,7 @@ class Currency(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/abtesting/models/empty_search.py b/algoliasearch/abtesting/models/empty_search.py index 4b753e4e4..72cea2051 100644 --- a/algoliasearch/abtesting/models/empty_search.py +++ b/algoliasearch/abtesting/models/empty_search.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "exclude": "exclude", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class EmptySearch(BaseModel): """ Configuration for handling empty searches. """ - exclude: Optional[bool] = Field(default=None, alias="exclude") + exclude: Optional[bool] = None """ Whether to exclude empty searches when calculating A/B test results. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class EmptySearch(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/abtesting/models/empty_search_filter.py b/algoliasearch/abtesting/models/empty_search_filter.py index eff8cb848..da6ec5bfa 100644 --- a/algoliasearch/abtesting/models/empty_search_filter.py +++ b/algoliasearch/abtesting/models/empty_search_filter.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "users_count": "usersCount", + "tracked_searches_count": "trackedSearchesCount", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class EmptySearchFilter(BaseModel): """ Empty searches removed from the A/B test as a result of configuration settings. """ - users_count: Optional[int] = Field(default=None, alias="usersCount") + users_count: Optional[int] = None """ Number of users removed from the A/B test. """ - tracked_searches_count: Optional[int] = Field( - default=None, alias="trackedSearchesCount" - ) + tracked_searches_count: Optional[int] = None """ Number of tracked searches removed from the A/B test. """ model_config = ConfigDict( @@ -35,6 +43,7 @@ class EmptySearchFilter(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/abtesting/models/error_base.py b/algoliasearch/abtesting/models/error_base.py index 075d8a3ac..01e47a756 100644 --- a/algoliasearch/abtesting/models/error_base.py +++ b/algoliasearch/abtesting/models/error_base.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,28 @@ from typing_extensions import Self +_ALIASES = { + "message": "message", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class ErrorBase(BaseModel): """ Error. """ - message: Optional[str] = Field(default=None, alias="message") + message: Optional[str] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, extra="allow", ) diff --git a/algoliasearch/abtesting/models/filter_effects.py b/algoliasearch/abtesting/models/filter_effects.py index e1d64daf0..430fd5d80 100644 --- a/algoliasearch/abtesting/models/filter_effects.py +++ b/algoliasearch/abtesting/models/filter_effects.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,20 +21,30 @@ from algoliasearch.abtesting.models.empty_search_filter import EmptySearchFilter from algoliasearch.abtesting.models.outliers_filter import OutliersFilter +_ALIASES = { + "outliers": "outliers", + "empty_search": "emptySearch", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class FilterEffects(BaseModel): """ A/B test filter effects resulting from configuration settings. """ - outliers: Optional[OutliersFilter] = Field(default=None, alias="outliers") - empty_search: Optional[EmptySearchFilter] = Field(default=None, alias="emptySearch") + outliers: Optional[OutliersFilter] = None + empty_search: Optional[EmptySearchFilter] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/abtesting/models/list_ab_tests_response.py b/algoliasearch/abtesting/models/list_ab_tests_response.py index 3d77b3256..f4a453295 100644 --- a/algoliasearch/abtesting/models/list_ab_tests_response.py +++ b/algoliasearch/abtesting/models/list_ab_tests_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,17 +20,27 @@ from algoliasearch.abtesting.models.ab_test import ABTest +_ALIASES = { + "abtests": "abtests", + "count": "count", + "total": "total", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ListABTestsResponse(BaseModel): """ ListABTestsResponse """ - abtests: List[ABTest] = Field(alias="abtests") + abtests: List[ABTest] """ A/B tests. """ - count: int = Field(alias="count") + count: int """ Number of A/B tests. """ - total: int = Field(alias="total") + total: int """ Number of retrievable A/B tests. """ model_config = ConfigDict( @@ -38,6 +48,7 @@ class ListABTestsResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/abtesting/models/minimum_detectable_effect.py b/algoliasearch/abtesting/models/minimum_detectable_effect.py index 3bffeb8db..57d815356 100644 --- a/algoliasearch/abtesting/models/minimum_detectable_effect.py +++ b/algoliasearch/abtesting/models/minimum_detectable_effect.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,21 +20,31 @@ from algoliasearch.abtesting.models.effect import Effect +_ALIASES = { + "size": "size", + "effect": "effect", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class MinimumDetectableEffect(BaseModel): """ Configuration for the smallest difference between test variants you want to detect. """ - size: Optional[float] = Field(default=None, alias="size") + size: Optional[float] = None """ Smallest difference in an observable metric between variants. For example, to detect a 10% difference between variants, set this value to 0.1. """ - effect: Optional[Effect] = Field(default=None, alias="effect") + effect: Optional[Effect] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/abtesting/models/outliers.py b/algoliasearch/abtesting/models/outliers.py index 503b75106..9970a90c5 100644 --- a/algoliasearch/abtesting/models/outliers.py +++ b/algoliasearch/abtesting/models/outliers.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "exclude": "exclude", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class Outliers(BaseModel): """ Configuration for handling outliers. """ - exclude: Optional[bool] = Field(default=None, alias="exclude") + exclude: Optional[bool] = None """ Whether to exclude outliers when calculating A/B test results. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class Outliers(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/abtesting/models/outliers_filter.py b/algoliasearch/abtesting/models/outliers_filter.py index 21cd53b43..740757098 100644 --- a/algoliasearch/abtesting/models/outliers_filter.py +++ b/algoliasearch/abtesting/models/outliers_filter.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "users_count": "usersCount", + "tracked_searches_count": "trackedSearchesCount", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class OutliersFilter(BaseModel): """ Outliers removed from the A/B test as a result of configuration settings. """ - users_count: Optional[int] = Field(default=None, alias="usersCount") + users_count: Optional[int] = None """ Number of users removed from the A/B test. """ - tracked_searches_count: Optional[int] = Field( - default=None, alias="trackedSearchesCount" - ) + tracked_searches_count: Optional[int] = None """ Number of tracked searches removed from the A/B test. """ model_config = ConfigDict( @@ -35,6 +43,7 @@ class OutliersFilter(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/abtesting/models/schedule_ab_test_response.py b/algoliasearch/abtesting/models/schedule_ab_test_response.py index e8ecbfe0c..61c6a6603 100644 --- a/algoliasearch/abtesting/models/schedule_ab_test_response.py +++ b/algoliasearch/abtesting/models/schedule_ab_test_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "ab_test_schedule_id": "abTestScheduleID", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class ScheduleABTestResponse(BaseModel): """ ScheduleABTestResponse """ - ab_test_schedule_id: int = Field(alias="abTestScheduleID") + ab_test_schedule_id: int """ Unique scheduled A/B test identifier. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class ScheduleABTestResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/abtesting/models/schedule_ab_tests_request.py b/algoliasearch/abtesting/models/schedule_ab_tests_request.py index 0a0b3ebb7..3531b0906 100644 --- a/algoliasearch/abtesting/models/schedule_ab_tests_request.py +++ b/algoliasearch/abtesting/models/schedule_ab_tests_request.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,30 @@ from algoliasearch.abtesting.models.add_ab_tests_variant import AddABTestsVariant +_ALIASES = { + "name": "name", + "variants": "variants", + "scheduled_at": "scheduledAt", + "end_at": "endAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ScheduleABTestsRequest(BaseModel): """ ScheduleABTestsRequest """ - name: str = Field(alias="name") + name: str """ A/B test name. """ - variants: List[AddABTestsVariant] = Field(alias="variants") + variants: List[AddABTestsVariant] """ A/B test variants. """ - scheduled_at: str = Field(alias="scheduledAt") + scheduled_at: str """ Date and time when the A/B test is scheduled to start, in RFC 3339 format. """ - end_at: str = Field(alias="endAt") + end_at: str """ End date and time of the A/B test, in RFC 3339 format. """ model_config = ConfigDict( @@ -40,6 +51,7 @@ class ScheduleABTestsRequest(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/abtesting/models/variant.py b/algoliasearch/abtesting/models/variant.py index 708e32314..0db869e40 100644 --- a/algoliasearch/abtesting/models/variant.py +++ b/algoliasearch/abtesting/models/variant.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,56 +21,77 @@ from algoliasearch.abtesting.models.currency import Currency from algoliasearch.abtesting.models.filter_effects import FilterEffects +_ALIASES = { + "add_to_cart_count": "addToCartCount", + "add_to_cart_rate": "addToCartRate", + "average_click_position": "averageClickPosition", + "click_count": "clickCount", + "click_through_rate": "clickThroughRate", + "conversion_count": "conversionCount", + "conversion_rate": "conversionRate", + "currencies": "currencies", + "description": "description", + "estimated_sample_size": "estimatedSampleSize", + "filter_effects": "filterEffects", + "index": "index", + "no_result_count": "noResultCount", + "purchase_count": "purchaseCount", + "purchase_rate": "purchaseRate", + "search_count": "searchCount", + "tracked_search_count": "trackedSearchCount", + "traffic_percentage": "trafficPercentage", + "user_count": "userCount", + "tracked_user_count": "trackedUserCount", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class Variant(BaseModel): """ Variant """ - add_to_cart_count: int = Field(alias="addToCartCount") + add_to_cart_count: int """ Number of add-to-cart events for this variant. """ - add_to_cart_rate: Optional[float] = Field(default=None, alias="addToCartRate") + add_to_cart_rate: Optional[float] = None """ [Add-to-cart rate](https://www.algolia.com/doc/guides/search-analytics/concepts/metrics/#add-to-cart-rate) for this variant. """ - average_click_position: Optional[int] = Field( - default=None, alias="averageClickPosition" - ) + average_click_position: Optional[int] = None """ [Average click position](https://www.algolia.com/doc/guides/search-analytics/concepts/metrics/#click-position) for this variant. """ - click_count: int = Field(alias="clickCount") + click_count: int """ Number of click events for this variant. """ - click_through_rate: Optional[float] = Field(default=None, alias="clickThroughRate") + click_through_rate: Optional[float] = None """ [Click-through rate](https://www.algolia.com/doc/guides/search-analytics/concepts/metrics/#click-through-rate) for this variant. """ - conversion_count: int = Field(alias="conversionCount") + conversion_count: int """ Number of click events for this variant. """ - conversion_rate: Optional[float] = Field(default=None, alias="conversionRate") + conversion_rate: Optional[float] = None """ [Conversion rate](https://www.algolia.com/doc/guides/search-analytics/concepts/metrics/#conversion-rate) for this variant. """ - currencies: Optional[Dict[str, Currency]] = Field(default=None, alias="currencies") + currencies: Optional[Dict[str, Currency]] = None """ A/B test currencies. """ - description: str = Field(alias="description") + description: str """ Description for this variant. """ - estimated_sample_size: Optional[int] = Field( - default=None, alias="estimatedSampleSize" - ) + estimated_sample_size: Optional[int] = None """ Estimated number of searches required to achieve the desired statistical significance. The A/B test configuration must include a `mininmumDetectableEffect` setting for this number to be included in the response. """ - filter_effects: Optional[FilterEffects] = Field(default=None, alias="filterEffects") - index: str = Field(alias="index") + filter_effects: Optional[FilterEffects] = None + index: str """ Index name of the A/B test variant (case-sensitive). """ - no_result_count: int = Field(alias="noResultCount") + no_result_count: int """ Number of [searches without results](https://www.algolia.com/doc/guides/search-analytics/concepts/metrics/#searches-without-results) for this variant. """ - purchase_count: int = Field(alias="purchaseCount") + purchase_count: int """ Number of purchase events for this variant. """ - purchase_rate: Optional[float] = Field(default=None, alias="purchaseRate") + purchase_rate: Optional[float] = None """ [Purchase rate](https://www.algolia.com/doc/guides/search-analytics/concepts/metrics/#purchase-rate) for this variant. """ - search_count: int = Field(alias="searchCount") + search_count: int """ Number of searches for this variant. """ - tracked_search_count: Optional[int] = Field( - default=None, alias="trackedSearchCount" - ) + tracked_search_count: Optional[int] = None """ Number of tracked searches. Tracked searches are search requests where the `clickAnalytics` parameter is true. """ - traffic_percentage: int = Field(alias="trafficPercentage") + traffic_percentage: int """ Percentage of search requests each variant receives. """ - user_count: int = Field(alias="userCount") + user_count: int """ Number of users that made searches to this variant. """ - tracked_user_count: int = Field(alias="trackedUserCount") + tracked_user_count: int """ Number of users that made tracked searches to this variant. """ model_config = ConfigDict( @@ -78,6 +99,7 @@ class Variant(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/client.py b/algoliasearch/analytics/client.py index ec71be32e..a13c17f76 100644 --- a/algoliasearch/analytics/client.py +++ b/algoliasearch/analytics/client.py @@ -8,7 +8,7 @@ from json import dumps from sys import version_info -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Dict, Optional, Union from urllib.parse import quote from pydantic import Field, StrictBool, StrictStr @@ -77,8 +77,9 @@ ) from algoliasearch.analytics.models.order_by import OrderBy from algoliasearch.http.api_response import ApiResponse +from algoliasearch.http.base_config import BaseConfig from algoliasearch.http.request_options import RequestOptions -from algoliasearch.http.serializer import bodySerializer +from algoliasearch.http.serializer import body_serializer from algoliasearch.http.transporter import Transporter from algoliasearch.http.transporter_sync import TransporterSync from algoliasearch.http.verb import Verb @@ -103,7 +104,7 @@ class AnalyticsClient: """ _transporter: Transporter - _config: AnalyticsConfig + _config: BaseConfig _request_options: RequestOptions def __init__( @@ -115,7 +116,9 @@ def __init__( config: Optional[AnalyticsConfig] = None, ) -> None: if transporter is not None and config is None: - config = transporter._config + config = AnalyticsConfig( + transporter.config.app_id, transporter.config.api_key, region + ) if config is None: config = AnalyticsConfig(app_id, api_key, region) @@ -167,7 +170,7 @@ async def close(self) -> None: async def set_client_api_key(self, api_key: str) -> None: """Sets a new API key to authenticate requests.""" - self._transporter._config.set_client_api_key(api_key) + self._transporter.config.set_client_api_key(api_key) async def custom_delete_with_http_info( self, @@ -200,11 +203,11 @@ async def custom_delete_with_http_info( "Parameter `path` is required when calling `custom_delete`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return await self._transporter.request( verb=Verb.DELETE, @@ -275,11 +278,11 @@ async def custom_get_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_get`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return await self._transporter.request( verb=Verb.GET, @@ -354,11 +357,11 @@ async def custom_post_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_post`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -369,7 +372,7 @@ async def custom_post_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -446,11 +449,11 @@ async def custom_put_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_put`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -461,7 +464,7 @@ async def custom_put_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -549,16 +552,16 @@ async def get_add_to_cart_rate_with_http_info( "Parameter `index` is required when calling `get_add_to_cart_rate`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return await self._transporter.request( verb=Verb.GET, @@ -661,16 +664,16 @@ async def get_average_click_position_with_http_info( "Parameter `index` is required when calling `get_average_click_position`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return await self._transporter.request( verb=Verb.GET, @@ -773,16 +776,16 @@ async def get_click_positions_with_http_info( "Parameter `index` is required when calling `get_click_positions`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return await self._transporter.request( verb=Verb.GET, @@ -885,16 +888,16 @@ async def get_click_through_rate_with_http_info( "Parameter `index` is required when calling `get_click_through_rate`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return await self._transporter.request( verb=Verb.GET, @@ -997,16 +1000,16 @@ async def get_conversion_rate_with_http_info( "Parameter `index` is required when calling `get_conversion_rate`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return await self._transporter.request( verb=Verb.GET, @@ -1109,16 +1112,16 @@ async def get_no_click_rate_with_http_info( "Parameter `index` is required when calling `get_no_click_rate`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return await self._transporter.request( verb=Verb.GET, @@ -1221,16 +1224,16 @@ async def get_no_results_rate_with_http_info( "Parameter `index` is required when calling `get_no_results_rate`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return await self._transporter.request( verb=Verb.GET, @@ -1333,16 +1336,16 @@ async def get_purchase_rate_with_http_info( "Parameter `index` is required when calling `get_purchase_rate`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return await self._transporter.request( verb=Verb.GET, @@ -1445,16 +1448,16 @@ async def get_revenue_with_http_info( "Parameter `index` is required when calling `get_revenue`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return await self._transporter.request( verb=Verb.GET, @@ -1557,16 +1560,16 @@ async def get_searches_count_with_http_info( "Parameter `index` is required when calling `get_searches_count`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return await self._transporter.request( verb=Verb.GET, @@ -1685,20 +1688,20 @@ async def get_searches_no_clicks_with_http_info( "Parameter `index` is required when calling `get_searches_no_clicks`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if limit is not None: - _query_parameters.append(("limit", limit)) + _query_parameters["limit"] = limit if offset is not None: - _query_parameters.append(("offset", offset)) + _query_parameters["offset"] = offset if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return await self._transporter.request( verb=Verb.GET, @@ -1833,20 +1836,20 @@ async def get_searches_no_results_with_http_info( "Parameter `index` is required when calling `get_searches_no_results`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if limit is not None: - _query_parameters.append(("limit", limit)) + _query_parameters["limit"] = limit if offset is not None: - _query_parameters.append(("offset", offset)) + _query_parameters["offset"] = offset if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return await self._transporter.request( verb=Verb.GET, @@ -1939,10 +1942,10 @@ async def get_status_with_http_info( if index is None: raise ValueError("Parameter `index` is required when calling `get_status`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index return await self._transporter.request( verb=Verb.GET, @@ -2035,20 +2038,20 @@ async def get_top_countries_with_http_info( "Parameter `index` is required when calling `get_top_countries`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if limit is not None: - _query_parameters.append(("limit", limit)) + _query_parameters["limit"] = limit if offset is not None: - _query_parameters.append(("offset", offset)) + _query_parameters["offset"] = offset if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return await self._transporter.request( verb=Verb.GET, @@ -2188,22 +2191,22 @@ async def get_top_filter_attributes_with_http_info( "Parameter `index` is required when calling `get_top_filter_attributes`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if search is not None: - _query_parameters.append(("search", search)) + _query_parameters["search"] = search if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if limit is not None: - _query_parameters.append(("limit", limit)) + _query_parameters["limit"] = limit if offset is not None: - _query_parameters.append(("offset", offset)) + _query_parameters["offset"] = offset if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return await self._transporter.request( verb=Verb.GET, @@ -2356,22 +2359,22 @@ async def get_top_filter_for_attribute_with_http_info( "Parameter `index` is required when calling `get_top_filter_for_attribute`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if search is not None: - _query_parameters.append(("search", search)) + _query_parameters["search"] = search if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if limit is not None: - _query_parameters.append(("limit", limit)) + _query_parameters["limit"] = limit if offset is not None: - _query_parameters.append(("offset", offset)) + _query_parameters["offset"] = offset if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return await self._transporter.request( verb=Verb.GET, @@ -2529,22 +2532,22 @@ async def get_top_filters_no_results_with_http_info( "Parameter `index` is required when calling `get_top_filters_no_results`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if search is not None: - _query_parameters.append(("search", search)) + _query_parameters["search"] = search if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if limit is not None: - _query_parameters.append(("limit", limit)) + _query_parameters["limit"] = limit if offset is not None: - _query_parameters.append(("offset", offset)) + _query_parameters["offset"] = offset if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return await self._transporter.request( verb=Verb.GET, @@ -2705,26 +2708,26 @@ async def get_top_hits_with_http_info( "Parameter `index` is required when calling `get_top_hits`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if search is not None: - _query_parameters.append(("search", search)) + _query_parameters["search"] = search if click_analytics is not None: - _query_parameters.append(("clickAnalytics", click_analytics)) + _query_parameters["clickAnalytics"] = click_analytics if revenue_analytics is not None: - _query_parameters.append(("revenueAnalytics", revenue_analytics)) + _query_parameters["revenueAnalytics"] = revenue_analytics if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if limit is not None: - _query_parameters.append(("limit", limit)) + _query_parameters["limit"] = limit if offset is not None: - _query_parameters.append(("offset", offset)) + _query_parameters["offset"] = offset if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return await self._transporter.request( verb=Verb.GET, @@ -2854,17 +2857,23 @@ async def get_top_searches_with_http_info( description="End date of the period to analyze, in `YYYY-MM-DD` format." ), ] = None, - order_by: Annotated[ - Optional[OrderBy], - Field( - description="Attribute by which to order the response items. If the `clickAnalytics` parameter is false, only `searchCount` is available. " - ), - ] = None, - direction: Annotated[ - Optional[Direction], - Field( - description="Sorting direction of the results: ascending or descending. " - ), + order_by: Union[ + Annotated[ + Optional[OrderBy], + Field( + description="Attribute by which to order the response items. If the `clickAnalytics` parameter is false, only `searchCount` is available. " + ), + ], + str, + ] = None, + direction: Union[ + Annotated[ + Optional[Direction], + Field( + description="Sorting direction of the results: ascending or descending. " + ), + ], + str, ] = None, limit: Annotated[ Optional[Annotated[int, Field(le=1000, strict=True)]], @@ -2921,28 +2930,28 @@ async def get_top_searches_with_http_info( "Parameter `index` is required when calling `get_top_searches`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if click_analytics is not None: - _query_parameters.append(("clickAnalytics", click_analytics)) + _query_parameters["clickAnalytics"] = click_analytics if revenue_analytics is not None: - _query_parameters.append(("revenueAnalytics", revenue_analytics)) + _query_parameters["revenueAnalytics"] = revenue_analytics if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if order_by is not None: - _query_parameters.append(("orderBy", order_by)) + _query_parameters["orderBy"] = order_by if direction is not None: - _query_parameters.append(("direction", direction)) + _query_parameters["direction"] = direction if limit is not None: - _query_parameters.append(("limit", limit)) + _query_parameters["limit"] = limit if offset is not None: - _query_parameters.append(("offset", offset)) + _query_parameters["offset"] = offset if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return await self._transporter.request( verb=Verb.GET, @@ -2981,17 +2990,23 @@ async def get_top_searches( description="End date of the period to analyze, in `YYYY-MM-DD` format." ), ] = None, - order_by: Annotated[ - Optional[OrderBy], - Field( - description="Attribute by which to order the response items. If the `clickAnalytics` parameter is false, only `searchCount` is available. " - ), - ] = None, - direction: Annotated[ - Optional[Direction], - Field( - description="Sorting direction of the results: ascending or descending. " - ), + order_by: Union[ + Annotated[ + Optional[OrderBy], + Field( + description="Attribute by which to order the response items. If the `clickAnalytics` parameter is false, only `searchCount` is available. " + ), + ], + str, + ] = None, + direction: Union[ + Annotated[ + Optional[Direction], + Field( + description="Sorting direction of the results: ascending or descending. " + ), + ], + str, ] = None, limit: Annotated[ Optional[Annotated[int, Field(le=1000, strict=True)]], @@ -3103,16 +3118,16 @@ async def get_users_count_with_http_info( "Parameter `index` is required when calling `get_users_count`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return await self._transporter.request( verb=Verb.GET, @@ -3189,7 +3204,7 @@ class AnalyticsClientSync: """ _transporter: TransporterSync - _config: AnalyticsConfig + _config: BaseConfig _request_options: RequestOptions def __init__( @@ -3201,7 +3216,9 @@ def __init__( config: Optional[AnalyticsConfig] = None, ) -> None: if transporter is not None and config is None: - config = transporter._config + config = AnalyticsConfig( + transporter.config.app_id, transporter.config.api_key, region + ) if config is None: config = AnalyticsConfig(app_id, api_key, region) @@ -3252,7 +3269,7 @@ def close(self) -> None: def set_client_api_key(self, api_key: str) -> None: """Sets a new API key to authenticate requests.""" - self._transporter._config.set_client_api_key(api_key) + self._transporter.config.set_client_api_key(api_key) def custom_delete_with_http_info( self, @@ -3285,11 +3302,11 @@ def custom_delete_with_http_info( "Parameter `path` is required when calling `custom_delete`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return self._transporter.request( verb=Verb.DELETE, @@ -3358,11 +3375,11 @@ def custom_get_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_get`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return self._transporter.request( verb=Verb.GET, @@ -3437,11 +3454,11 @@ def custom_post_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_post`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -3452,7 +3469,7 @@ def custom_post_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -3527,11 +3544,11 @@ def custom_put_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_put`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -3542,7 +3559,7 @@ def custom_put_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -3628,16 +3645,16 @@ def get_add_to_cart_rate_with_http_info( "Parameter `index` is required when calling `get_add_to_cart_rate`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return self._transporter.request( verb=Verb.GET, @@ -3740,16 +3757,16 @@ def get_average_click_position_with_http_info( "Parameter `index` is required when calling `get_average_click_position`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return self._transporter.request( verb=Verb.GET, @@ -3852,16 +3869,16 @@ def get_click_positions_with_http_info( "Parameter `index` is required when calling `get_click_positions`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return self._transporter.request( verb=Verb.GET, @@ -3964,16 +3981,16 @@ def get_click_through_rate_with_http_info( "Parameter `index` is required when calling `get_click_through_rate`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return self._transporter.request( verb=Verb.GET, @@ -4076,16 +4093,16 @@ def get_conversion_rate_with_http_info( "Parameter `index` is required when calling `get_conversion_rate`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return self._transporter.request( verb=Verb.GET, @@ -4188,16 +4205,16 @@ def get_no_click_rate_with_http_info( "Parameter `index` is required when calling `get_no_click_rate`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return self._transporter.request( verb=Verb.GET, @@ -4300,16 +4317,16 @@ def get_no_results_rate_with_http_info( "Parameter `index` is required when calling `get_no_results_rate`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return self._transporter.request( verb=Verb.GET, @@ -4412,16 +4429,16 @@ def get_purchase_rate_with_http_info( "Parameter `index` is required when calling `get_purchase_rate`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return self._transporter.request( verb=Verb.GET, @@ -4524,16 +4541,16 @@ def get_revenue_with_http_info( "Parameter `index` is required when calling `get_revenue`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return self._transporter.request( verb=Verb.GET, @@ -4636,16 +4653,16 @@ def get_searches_count_with_http_info( "Parameter `index` is required when calling `get_searches_count`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return self._transporter.request( verb=Verb.GET, @@ -4764,20 +4781,20 @@ def get_searches_no_clicks_with_http_info( "Parameter `index` is required when calling `get_searches_no_clicks`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if limit is not None: - _query_parameters.append(("limit", limit)) + _query_parameters["limit"] = limit if offset is not None: - _query_parameters.append(("offset", offset)) + _query_parameters["offset"] = offset if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return self._transporter.request( verb=Verb.GET, @@ -4912,20 +4929,20 @@ def get_searches_no_results_with_http_info( "Parameter `index` is required when calling `get_searches_no_results`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if limit is not None: - _query_parameters.append(("limit", limit)) + _query_parameters["limit"] = limit if offset is not None: - _query_parameters.append(("offset", offset)) + _query_parameters["offset"] = offset if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return self._transporter.request( verb=Verb.GET, @@ -5018,10 +5035,10 @@ def get_status_with_http_info( if index is None: raise ValueError("Parameter `index` is required when calling `get_status`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index return self._transporter.request( verb=Verb.GET, @@ -5114,20 +5131,20 @@ def get_top_countries_with_http_info( "Parameter `index` is required when calling `get_top_countries`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if limit is not None: - _query_parameters.append(("limit", limit)) + _query_parameters["limit"] = limit if offset is not None: - _query_parameters.append(("offset", offset)) + _query_parameters["offset"] = offset if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return self._transporter.request( verb=Verb.GET, @@ -5267,22 +5284,22 @@ def get_top_filter_attributes_with_http_info( "Parameter `index` is required when calling `get_top_filter_attributes`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if search is not None: - _query_parameters.append(("search", search)) + _query_parameters["search"] = search if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if limit is not None: - _query_parameters.append(("limit", limit)) + _query_parameters["limit"] = limit if offset is not None: - _query_parameters.append(("offset", offset)) + _query_parameters["offset"] = offset if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return self._transporter.request( verb=Verb.GET, @@ -5435,22 +5452,22 @@ def get_top_filter_for_attribute_with_http_info( "Parameter `index` is required when calling `get_top_filter_for_attribute`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if search is not None: - _query_parameters.append(("search", search)) + _query_parameters["search"] = search if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if limit is not None: - _query_parameters.append(("limit", limit)) + _query_parameters["limit"] = limit if offset is not None: - _query_parameters.append(("offset", offset)) + _query_parameters["offset"] = offset if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return self._transporter.request( verb=Verb.GET, @@ -5608,22 +5625,22 @@ def get_top_filters_no_results_with_http_info( "Parameter `index` is required when calling `get_top_filters_no_results`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if search is not None: - _query_parameters.append(("search", search)) + _query_parameters["search"] = search if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if limit is not None: - _query_parameters.append(("limit", limit)) + _query_parameters["limit"] = limit if offset is not None: - _query_parameters.append(("offset", offset)) + _query_parameters["offset"] = offset if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return self._transporter.request( verb=Verb.GET, @@ -5784,26 +5801,26 @@ def get_top_hits_with_http_info( "Parameter `index` is required when calling `get_top_hits`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if search is not None: - _query_parameters.append(("search", search)) + _query_parameters["search"] = search if click_analytics is not None: - _query_parameters.append(("clickAnalytics", click_analytics)) + _query_parameters["clickAnalytics"] = click_analytics if revenue_analytics is not None: - _query_parameters.append(("revenueAnalytics", revenue_analytics)) + _query_parameters["revenueAnalytics"] = revenue_analytics if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if limit is not None: - _query_parameters.append(("limit", limit)) + _query_parameters["limit"] = limit if offset is not None: - _query_parameters.append(("offset", offset)) + _query_parameters["offset"] = offset if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return self._transporter.request( verb=Verb.GET, @@ -5933,17 +5950,23 @@ def get_top_searches_with_http_info( description="End date of the period to analyze, in `YYYY-MM-DD` format." ), ] = None, - order_by: Annotated[ - Optional[OrderBy], - Field( - description="Attribute by which to order the response items. If the `clickAnalytics` parameter is false, only `searchCount` is available. " - ), - ] = None, - direction: Annotated[ - Optional[Direction], - Field( - description="Sorting direction of the results: ascending or descending. " - ), + order_by: Union[ + Annotated[ + Optional[OrderBy], + Field( + description="Attribute by which to order the response items. If the `clickAnalytics` parameter is false, only `searchCount` is available. " + ), + ], + str, + ] = None, + direction: Union[ + Annotated[ + Optional[Direction], + Field( + description="Sorting direction of the results: ascending or descending. " + ), + ], + str, ] = None, limit: Annotated[ Optional[Annotated[int, Field(le=1000, strict=True)]], @@ -6000,28 +6023,28 @@ def get_top_searches_with_http_info( "Parameter `index` is required when calling `get_top_searches`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if click_analytics is not None: - _query_parameters.append(("clickAnalytics", click_analytics)) + _query_parameters["clickAnalytics"] = click_analytics if revenue_analytics is not None: - _query_parameters.append(("revenueAnalytics", revenue_analytics)) + _query_parameters["revenueAnalytics"] = revenue_analytics if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if order_by is not None: - _query_parameters.append(("orderBy", order_by)) + _query_parameters["orderBy"] = order_by if direction is not None: - _query_parameters.append(("direction", direction)) + _query_parameters["direction"] = direction if limit is not None: - _query_parameters.append(("limit", limit)) + _query_parameters["limit"] = limit if offset is not None: - _query_parameters.append(("offset", offset)) + _query_parameters["offset"] = offset if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return self._transporter.request( verb=Verb.GET, @@ -6060,17 +6083,23 @@ def get_top_searches( description="End date of the period to analyze, in `YYYY-MM-DD` format." ), ] = None, - order_by: Annotated[ - Optional[OrderBy], - Field( - description="Attribute by which to order the response items. If the `clickAnalytics` parameter is false, only `searchCount` is available. " - ), - ] = None, - direction: Annotated[ - Optional[Direction], - Field( - description="Sorting direction of the results: ascending or descending. " - ), + order_by: Union[ + Annotated[ + Optional[OrderBy], + Field( + description="Attribute by which to order the response items. If the `clickAnalytics` parameter is false, only `searchCount` is available. " + ), + ], + str, + ] = None, + direction: Union[ + Annotated[ + Optional[Direction], + Field( + description="Sorting direction of the results: ascending or descending. " + ), + ], + str, ] = None, limit: Annotated[ Optional[Annotated[int, Field(le=1000, strict=True)]], @@ -6182,16 +6211,16 @@ def get_users_count_with_http_info( "Parameter `index` is required when calling `get_users_count`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if index is not None: - _query_parameters.append(("index", index)) + _query_parameters["index"] = index if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date if tags is not None: - _query_parameters.append(("tags", tags)) + _query_parameters["tags"] = tags return self._transporter.request( verb=Verb.GET, diff --git a/algoliasearch/analytics/config.py b/algoliasearch/analytics/config.py index eb8439920..438652387 100644 --- a/algoliasearch/analytics/config.py +++ b/algoliasearch/analytics/config.py @@ -7,11 +7,19 @@ class AnalyticsConfig(BaseConfig): - def __init__(self, app_id: str, api_key: str, region: Optional[str] = None) -> None: + def __init__( + self, + app_id: Optional[str], + api_key: Optional[str], + region: Optional[str] = None, + ) -> None: super().__init__(app_id, api_key) user_agent = UserAgent().add("Analytics") + assert app_id, "`app_id` is missing." + assert api_key, "`api_key` is missing." + self.headers = { "x-algolia-application-id": app_id, "x-algolia-api-key": api_key, @@ -42,7 +50,9 @@ def __init__(self, app_id: str, api_key: str, region: Optional[str] = None) -> N Host( "analytics.algolia.com" if region is None - else "analytics.{region}.algolia.com".replace("{region}", region) + else "analytics.{region}.algolia.com".replace( + "{region}", region or "" + ) ) ] ) diff --git a/algoliasearch/analytics/models/click_position.py b/algoliasearch/analytics/models/click_position.py index e2d67f5f5..ab847e91e 100644 --- a/algoliasearch/analytics/models/click_position.py +++ b/algoliasearch/analytics/models/click_position.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "position": "position", + "click_count": "clickCount", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class ClickPosition(BaseModel): """ Click position. """ - position: Optional[List[int]] = Field(default=None, alias="position") + position: Optional[List[int]] = None """ Range of positions in the search results, using the pattern `[start,end]`. For positions 11 and up, click events are summed over the specified range. `-1` indicates the end of the list of search results. """ - click_count: Optional[int] = Field(default=None, alias="clickCount") + click_count: Optional[int] = None """ Number of times this search has been clicked at that position. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class ClickPosition(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/currency_code.py b/algoliasearch/analytics/models/currency_code.py index e5f6d02b8..721e0b335 100644 --- a/algoliasearch/analytics/models/currency_code.py +++ b/algoliasearch/analytics/models/currency_code.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "currency": "currency", + "revenue": "revenue", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class CurrencyCode(BaseModel): """ Currency code. """ - currency: Optional[str] = Field(default=None, alias="currency") + currency: Optional[str] = None """ Currency code. """ - revenue: Optional[float] = Field(default=None, alias="revenue") + revenue: Optional[float] = None """ Revenue associated with this search in this currency. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class CurrencyCode(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/daily_add_to_cart_rates.py b/algoliasearch/analytics/models/daily_add_to_cart_rates.py index 7cc1999bb..d198e1535 100644 --- a/algoliasearch/analytics/models/daily_add_to_cart_rates.py +++ b/algoliasearch/analytics/models/daily_add_to_cart_rates.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,30 @@ from typing_extensions import Self +_ALIASES = { + "rate": "rate", + "tracked_search_count": "trackedSearchCount", + "add_to_cart_count": "addToCartCount", + "var_date": "date", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class DailyAddToCartRates(BaseModel): """ DailyAddToCartRates """ - rate: float = Field(alias="rate") + rate: float """ Add-to-cart rate, calculated as number of tracked searches with at least one add-to-cart event divided by the number of tracked searches. If null, Algolia didn't receive any search requests with `clickAnalytics` set to true. """ - tracked_search_count: int = Field(alias="trackedSearchCount") + tracked_search_count: int """ Number of tracked searches. Tracked searches are search requests where the `clickAnalytics` parameter is true. """ - add_to_cart_count: int = Field(alias="addToCartCount") + add_to_cart_count: int """ Number of add-to-cart events from this search. """ - var_date: str = Field(alias="date") + var_date: str """ Date in the format YYYY-MM-DD. """ model_config = ConfigDict( @@ -37,6 +49,7 @@ class DailyAddToCartRates(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/daily_average_clicks.py b/algoliasearch/analytics/models/daily_average_clicks.py index ef5dfa73b..edfa0d747 100644 --- a/algoliasearch/analytics/models/daily_average_clicks.py +++ b/algoliasearch/analytics/models/daily_average_clicks.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "average": "average", + "click_count": "clickCount", + "var_date": "date", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class DailyAverageClicks(BaseModel): """ DailyAverageClicks """ - average: float = Field(alias="average") + average: float """ Average position of a clicked search result in the list of search results. If null, Algolia didn't receive any search requests with `clickAnalytics` set to true. """ - click_count: int = Field(alias="clickCount") + click_count: int """ Number of clicks associated with this search. """ - var_date: str = Field(alias="date") + var_date: str """ Date in the format YYYY-MM-DD. """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class DailyAverageClicks(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/daily_click_through_rates.py b/algoliasearch/analytics/models/daily_click_through_rates.py index 648d5e333..f99ea6526 100644 --- a/algoliasearch/analytics/models/daily_click_through_rates.py +++ b/algoliasearch/analytics/models/daily_click_through_rates.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,30 @@ from typing_extensions import Self +_ALIASES = { + "rate": "rate", + "click_count": "clickCount", + "tracked_search_count": "trackedSearchCount", + "var_date": "date", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class DailyClickThroughRates(BaseModel): """ DailyClickThroughRates """ - rate: float = Field(alias="rate") + rate: float """ Click-through rate, calculated as number of tracked searches with at least one click event divided by the number of tracked searches. If null, Algolia didn't receive any search requests with `clickAnalytics` set to true. """ - click_count: int = Field(alias="clickCount") + click_count: int """ Number of clicks associated with this search. """ - tracked_search_count: int = Field(alias="trackedSearchCount") + tracked_search_count: int """ Number of tracked searches. Tracked searches are search requests where the `clickAnalytics` parameter is true. """ - var_date: str = Field(alias="date") + var_date: str """ Date in the format YYYY-MM-DD. """ model_config = ConfigDict( @@ -37,6 +49,7 @@ class DailyClickThroughRates(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/daily_conversion_rates.py b/algoliasearch/analytics/models/daily_conversion_rates.py index 3d32b010a..6f97d09da 100644 --- a/algoliasearch/analytics/models/daily_conversion_rates.py +++ b/algoliasearch/analytics/models/daily_conversion_rates.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,30 @@ from typing_extensions import Self +_ALIASES = { + "rate": "rate", + "tracked_search_count": "trackedSearchCount", + "conversion_count": "conversionCount", + "var_date": "date", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class DailyConversionRates(BaseModel): """ DailyConversionRates """ - rate: float = Field(alias="rate") + rate: float """ Conversion rate, calculated as number of tracked searches with at least one conversion event divided by the number of tracked searches. If null, Algolia didn't receive any search requests with `clickAnalytics` set to true. """ - tracked_search_count: int = Field(alias="trackedSearchCount") + tracked_search_count: int """ Number of tracked searches. Tracked searches are search requests where the `clickAnalytics` parameter is true. """ - conversion_count: int = Field(alias="conversionCount") + conversion_count: int """ Number of conversions from this search. """ - var_date: str = Field(alias="date") + var_date: str """ Date in the format YYYY-MM-DD. """ model_config = ConfigDict( @@ -37,6 +49,7 @@ class DailyConversionRates(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/daily_no_click_rates.py b/algoliasearch/analytics/models/daily_no_click_rates.py index 76a2f04a8..c8038d81d 100644 --- a/algoliasearch/analytics/models/daily_no_click_rates.py +++ b/algoliasearch/analytics/models/daily_no_click_rates.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,30 @@ from typing_extensions import Self +_ALIASES = { + "rate": "rate", + "count": "count", + "no_click_count": "noClickCount", + "var_date": "date", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class DailyNoClickRates(BaseModel): """ DailyNoClickRates """ - rate: float = Field(alias="rate") + rate: float """ No click rate, calculated as number of tracked searches without any click divided by the number of tracked searches. """ - count: int = Field(alias="count") + count: int """ Number of tracked searches. Tracked searches are search requests where the `clickAnalytics` parameter is true. """ - no_click_count: int = Field(alias="noClickCount") + no_click_count: int """ Number of times this search was returned as a result without any click. """ - var_date: str = Field(alias="date") + var_date: str """ Date in the format YYYY-MM-DD. """ model_config = ConfigDict( @@ -37,6 +49,7 @@ class DailyNoClickRates(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/daily_no_results_rates.py b/algoliasearch/analytics/models/daily_no_results_rates.py index 52d546adf..e2fb43f25 100644 --- a/algoliasearch/analytics/models/daily_no_results_rates.py +++ b/algoliasearch/analytics/models/daily_no_results_rates.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,30 @@ from typing_extensions import Self +_ALIASES = { + "var_date": "date", + "no_result_count": "noResultCount", + "count": "count", + "rate": "rate", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class DailyNoResultsRates(BaseModel): """ DailyNoResultsRates """ - var_date: str = Field(alias="date") + var_date: str """ Date in the format YYYY-MM-DD. """ - no_result_count: int = Field(alias="noResultCount") + no_result_count: int """ Number of searches without any results. """ - count: int = Field(alias="count") + count: int """ Number of searches. """ - rate: float = Field(alias="rate") + rate: float """ No results rate, calculated as number of searches with zero results divided by the total number of searches. """ model_config = ConfigDict( @@ -37,6 +49,7 @@ class DailyNoResultsRates(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/daily_purchase_rates.py b/algoliasearch/analytics/models/daily_purchase_rates.py index 5a5398a82..9ca5d9516 100644 --- a/algoliasearch/analytics/models/daily_purchase_rates.py +++ b/algoliasearch/analytics/models/daily_purchase_rates.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,30 @@ from typing_extensions import Self +_ALIASES = { + "rate": "rate", + "tracked_search_count": "trackedSearchCount", + "purchase_count": "purchaseCount", + "var_date": "date", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class DailyPurchaseRates(BaseModel): """ DailyPurchaseRates """ - rate: float = Field(alias="rate") + rate: float """ Purchase rate, calculated as number of tracked searches with at least one purchase event divided by the number of tracked searches. If null, Algolia didn't receive any search requests with `clickAnalytics` set to true. """ - tracked_search_count: int = Field(alias="trackedSearchCount") + tracked_search_count: int """ Number of tracked searches. Tracked searches are search requests where the `clickAnalytics` parameter is true. """ - purchase_count: int = Field(alias="purchaseCount") + purchase_count: int """ Number of purchase events from this search. """ - var_date: str = Field(alias="date") + var_date: str """ Date in the format YYYY-MM-DD. """ model_config = ConfigDict( @@ -37,6 +49,7 @@ class DailyPurchaseRates(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/daily_revenue.py b/algoliasearch/analytics/models/daily_revenue.py index 1b52eca70..eff63a85f 100644 --- a/algoliasearch/analytics/models/daily_revenue.py +++ b/algoliasearch/analytics/models/daily_revenue.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,15 +20,24 @@ from algoliasearch.analytics.models.currency_code import CurrencyCode +_ALIASES = { + "currencies": "currencies", + "var_date": "date", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class DailyRevenue(BaseModel): """ DailyRevenue """ - currencies: Dict[str, CurrencyCode] = Field(alias="currencies") + currencies: Dict[str, CurrencyCode] """ Revenue associated with this search, broken-down by currencies. """ - var_date: str = Field(alias="date") + var_date: str """ Date in the format YYYY-MM-DD. """ model_config = ConfigDict( @@ -36,6 +45,7 @@ class DailyRevenue(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/daily_searches.py b/algoliasearch/analytics/models/daily_searches.py index bb243dda7..e71ec16f1 100644 --- a/algoliasearch/analytics/models/daily_searches.py +++ b/algoliasearch/analytics/models/daily_searches.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "var_date": "date", + "count": "count", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class DailySearches(BaseModel): """ DailySearches """ - var_date: str = Field(alias="date") + var_date: str """ Date in the format YYYY-MM-DD. """ - count: int = Field(alias="count") + count: int """ Number of occurrences. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class DailySearches(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/daily_searches_no_clicks.py b/algoliasearch/analytics/models/daily_searches_no_clicks.py index 58d89a344..e1061042c 100644 --- a/algoliasearch/analytics/models/daily_searches_no_clicks.py +++ b/algoliasearch/analytics/models/daily_searches_no_clicks.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "search": "search", + "count": "count", + "nb_hits": "nbHits", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class DailySearchesNoClicks(BaseModel): """ DailySearchesNoClicks """ - search: str = Field(alias="search") + search: str """ Search query. """ - count: int = Field(alias="count") + count: int """ Number of tracked searches. """ - nb_hits: int = Field(alias="nbHits") + nb_hits: int """ Number of results (hits). """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class DailySearchesNoClicks(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/daily_searches_no_results.py b/algoliasearch/analytics/models/daily_searches_no_results.py index 14cbb2b06..6f9a1251e 100644 --- a/algoliasearch/analytics/models/daily_searches_no_results.py +++ b/algoliasearch/analytics/models/daily_searches_no_results.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "search": "search", + "count": "count", + "with_filter_count": "withFilterCount", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class DailySearchesNoResults(BaseModel): """ DailySearchesNoResults """ - search: str = Field(alias="search") + search: str """ Search query. """ - count: int = Field(alias="count") + count: int """ Number of occurrences. """ - with_filter_count: int = Field(alias="withFilterCount") + with_filter_count: int """ Number of searches for this term with applied filters. """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class DailySearchesNoResults(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/daily_users.py b/algoliasearch/analytics/models/daily_users.py index f79a7e727..54a614e7f 100644 --- a/algoliasearch/analytics/models/daily_users.py +++ b/algoliasearch/analytics/models/daily_users.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "var_date": "date", + "count": "count", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class DailyUsers(BaseModel): """ DailyUsers """ - var_date: str = Field(alias="date") + var_date: str """ Date in the format YYYY-MM-DD. """ - count: int = Field(alias="count") + count: int """ Number of unique users. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class DailyUsers(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/error_base.py b/algoliasearch/analytics/models/error_base.py index 075d8a3ac..01e47a756 100644 --- a/algoliasearch/analytics/models/error_base.py +++ b/algoliasearch/analytics/models/error_base.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,28 @@ from typing_extensions import Self +_ALIASES = { + "message": "message", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class ErrorBase(BaseModel): """ Error. """ - message: Optional[str] = Field(default=None, alias="message") + message: Optional[str] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, extra="allow", ) diff --git a/algoliasearch/analytics/models/get_add_to_cart_rate_response.py b/algoliasearch/analytics/models/get_add_to_cart_rate_response.py index 97e376593..08aad73f1 100644 --- a/algoliasearch/analytics/models/get_add_to_cart_rate_response.py +++ b/algoliasearch/analytics/models/get_add_to_cart_rate_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,30 @@ from algoliasearch.analytics.models.daily_add_to_cart_rates import DailyAddToCartRates +_ALIASES = { + "rate": "rate", + "tracked_search_count": "trackedSearchCount", + "add_to_cart_count": "addToCartCount", + "dates": "dates", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetAddToCartRateResponse(BaseModel): """ GetAddToCartRateResponse """ - rate: float = Field(alias="rate") + rate: float """ Add-to-cart rate, calculated as number of tracked searches with at least one add-to-cart event divided by the number of tracked searches. If null, Algolia didn't receive any search requests with `clickAnalytics` set to true. """ - tracked_search_count: int = Field(alias="trackedSearchCount") + tracked_search_count: int """ Number of tracked searches. Tracked searches are search requests where the `clickAnalytics` parameter is true. """ - add_to_cart_count: int = Field(alias="addToCartCount") + add_to_cart_count: int """ Number of add-to-cart events from this search. """ - dates: List[DailyAddToCartRates] = Field(alias="dates") + dates: List[DailyAddToCartRates] """ Daily add-to-cart rates. """ model_config = ConfigDict( @@ -40,6 +51,7 @@ class GetAddToCartRateResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/get_average_click_position_response.py b/algoliasearch/analytics/models/get_average_click_position_response.py index e229417aa..41f3eb776 100644 --- a/algoliasearch/analytics/models/get_average_click_position_response.py +++ b/algoliasearch/analytics/models/get_average_click_position_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,17 +20,27 @@ from algoliasearch.analytics.models.daily_average_clicks import DailyAverageClicks +_ALIASES = { + "average": "average", + "click_count": "clickCount", + "dates": "dates", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetAverageClickPositionResponse(BaseModel): """ GetAverageClickPositionResponse """ - average: float = Field(alias="average") + average: float """ Average position of a clicked search result in the list of search results. If null, Algolia didn't receive any search requests with `clickAnalytics` set to true. """ - click_count: int = Field(alias="clickCount") + click_count: int """ Number of clicks associated with this search. """ - dates: List[DailyAverageClicks] = Field(alias="dates") + dates: List[DailyAverageClicks] """ Daily average click positions. """ model_config = ConfigDict( @@ -38,6 +48,7 @@ class GetAverageClickPositionResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/get_click_positions_response.py b/algoliasearch/analytics/models/get_click_positions_response.py index 21908c479..2dc39f67f 100644 --- a/algoliasearch/analytics/models/get_click_positions_response.py +++ b/algoliasearch/analytics/models/get_click_positions_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,13 +20,21 @@ from algoliasearch.analytics.models.click_position import ClickPosition +_ALIASES = { + "positions": "positions", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetClickPositionsResponse(BaseModel): """ GetClickPositionsResponse """ - positions: List[ClickPosition] = Field(alias="positions") + positions: List[ClickPosition] """ List of positions in the search results and clicks associated with this search. """ model_config = ConfigDict( @@ -34,6 +42,7 @@ class GetClickPositionsResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/get_click_through_rate_response.py b/algoliasearch/analytics/models/get_click_through_rate_response.py index d2358b31a..7027ac2fb 100644 --- a/algoliasearch/analytics/models/get_click_through_rate_response.py +++ b/algoliasearch/analytics/models/get_click_through_rate_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,19 +22,30 @@ DailyClickThroughRates, ) +_ALIASES = { + "rate": "rate", + "click_count": "clickCount", + "tracked_search_count": "trackedSearchCount", + "dates": "dates", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetClickThroughRateResponse(BaseModel): """ GetClickThroughRateResponse """ - rate: float = Field(alias="rate") + rate: float """ Click-through rate, calculated as number of tracked searches with at least one click event divided by the number of tracked searches. If null, Algolia didn't receive any search requests with `clickAnalytics` set to true. """ - click_count: int = Field(alias="clickCount") + click_count: int """ Number of clicks associated with this search. """ - tracked_search_count: int = Field(alias="trackedSearchCount") + tracked_search_count: int """ Number of tracked searches. Tracked searches are search requests where the `clickAnalytics` parameter is true. """ - dates: List[DailyClickThroughRates] = Field(alias="dates") + dates: List[DailyClickThroughRates] """ Daily click-through rates. """ model_config = ConfigDict( @@ -42,6 +53,7 @@ class GetClickThroughRateResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/get_conversion_rate_response.py b/algoliasearch/analytics/models/get_conversion_rate_response.py index d3bf1881d..280b6c5d3 100644 --- a/algoliasearch/analytics/models/get_conversion_rate_response.py +++ b/algoliasearch/analytics/models/get_conversion_rate_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,30 @@ from algoliasearch.analytics.models.daily_conversion_rates import DailyConversionRates +_ALIASES = { + "rate": "rate", + "tracked_search_count": "trackedSearchCount", + "conversion_count": "conversionCount", + "dates": "dates", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetConversionRateResponse(BaseModel): """ GetConversionRateResponse """ - rate: float = Field(alias="rate") + rate: float """ Conversion rate, calculated as number of tracked searches with at least one conversion event divided by the number of tracked searches. If null, Algolia didn't receive any search requests with `clickAnalytics` set to true. """ - tracked_search_count: int = Field(alias="trackedSearchCount") + tracked_search_count: int """ Number of tracked searches. Tracked searches are search requests where the `clickAnalytics` parameter is true. """ - conversion_count: int = Field(alias="conversionCount") + conversion_count: int """ Number of conversions from this search. """ - dates: List[DailyConversionRates] = Field(alias="dates") + dates: List[DailyConversionRates] """ Daily conversion rates. """ model_config = ConfigDict( @@ -40,6 +51,7 @@ class GetConversionRateResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/get_no_click_rate_response.py b/algoliasearch/analytics/models/get_no_click_rate_response.py index fb0b473ec..7e8a9ff90 100644 --- a/algoliasearch/analytics/models/get_no_click_rate_response.py +++ b/algoliasearch/analytics/models/get_no_click_rate_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,30 @@ from algoliasearch.analytics.models.daily_no_click_rates import DailyNoClickRates +_ALIASES = { + "rate": "rate", + "count": "count", + "no_click_count": "noClickCount", + "dates": "dates", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetNoClickRateResponse(BaseModel): """ GetNoClickRateResponse """ - rate: float = Field(alias="rate") + rate: float """ No click rate, calculated as number of tracked searches without any click divided by the number of tracked searches. """ - count: int = Field(alias="count") + count: int """ Number of tracked searches. Tracked searches are search requests where the `clickAnalytics` parameter is true. """ - no_click_count: int = Field(alias="noClickCount") + no_click_count: int """ Number of times this search was returned as a result without any click. """ - dates: List[DailyNoClickRates] = Field(alias="dates") + dates: List[DailyNoClickRates] """ Daily no click rates. """ model_config = ConfigDict( @@ -40,6 +51,7 @@ class GetNoClickRateResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/get_no_results_rate_response.py b/algoliasearch/analytics/models/get_no_results_rate_response.py index d302738b7..349456898 100644 --- a/algoliasearch/analytics/models/get_no_results_rate_response.py +++ b/algoliasearch/analytics/models/get_no_results_rate_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,30 @@ from algoliasearch.analytics.models.daily_no_results_rates import DailyNoResultsRates +_ALIASES = { + "rate": "rate", + "count": "count", + "no_result_count": "noResultCount", + "dates": "dates", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetNoResultsRateResponse(BaseModel): """ GetNoResultsRateResponse """ - rate: float = Field(alias="rate") + rate: float """ No results rate, calculated as number of searches with zero results divided by the total number of searches. """ - count: int = Field(alias="count") + count: int """ Number of searches. """ - no_result_count: int = Field(alias="noResultCount") + no_result_count: int """ Number of searches without any results. """ - dates: List[DailyNoResultsRates] = Field(alias="dates") + dates: List[DailyNoResultsRates] """ Daily no results rates. """ model_config = ConfigDict( @@ -40,6 +51,7 @@ class GetNoResultsRateResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/get_purchase_rate_response.py b/algoliasearch/analytics/models/get_purchase_rate_response.py index c73a0fa0d..a2881e3d6 100644 --- a/algoliasearch/analytics/models/get_purchase_rate_response.py +++ b/algoliasearch/analytics/models/get_purchase_rate_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,30 @@ from algoliasearch.analytics.models.daily_purchase_rates import DailyPurchaseRates +_ALIASES = { + "rate": "rate", + "tracked_search_count": "trackedSearchCount", + "purchase_count": "purchaseCount", + "dates": "dates", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetPurchaseRateResponse(BaseModel): """ GetPurchaseRateResponse """ - rate: float = Field(alias="rate") + rate: float """ Purchase rate, calculated as number of tracked searches with at least one purchase event divided by the number of tracked searches. If null, Algolia didn't receive any search requests with `clickAnalytics` set to true. """ - tracked_search_count: int = Field(alias="trackedSearchCount") + tracked_search_count: int """ Number of tracked searches. Tracked searches are search requests where the `clickAnalytics` parameter is true. """ - purchase_count: int = Field(alias="purchaseCount") + purchase_count: int """ Number of purchase events from this search. """ - dates: List[DailyPurchaseRates] = Field(alias="dates") + dates: List[DailyPurchaseRates] """ Daily purchase rates. """ model_config = ConfigDict( @@ -40,6 +51,7 @@ class GetPurchaseRateResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/get_revenue.py b/algoliasearch/analytics/models/get_revenue.py index cda71e22f..9a8b5f2e4 100644 --- a/algoliasearch/analytics/models/get_revenue.py +++ b/algoliasearch/analytics/models/get_revenue.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,15 +21,24 @@ from algoliasearch.analytics.models.currency_code import CurrencyCode from algoliasearch.analytics.models.daily_revenue import DailyRevenue +_ALIASES = { + "currencies": "currencies", + "dates": "dates", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetRevenue(BaseModel): """ GetRevenue """ - currencies: Dict[str, CurrencyCode] = Field(alias="currencies") + currencies: Dict[str, CurrencyCode] """ Revenue associated with this search, broken-down by currencies. """ - dates: List[DailyRevenue] = Field(alias="dates") + dates: List[DailyRevenue] """ Daily revenue. """ model_config = ConfigDict( @@ -37,6 +46,7 @@ class GetRevenue(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/get_searches_count_response.py b/algoliasearch/analytics/models/get_searches_count_response.py index 2d1e3d9ec..24ee7d03b 100644 --- a/algoliasearch/analytics/models/get_searches_count_response.py +++ b/algoliasearch/analytics/models/get_searches_count_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,15 +20,24 @@ from algoliasearch.analytics.models.daily_searches import DailySearches +_ALIASES = { + "count": "count", + "dates": "dates", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetSearchesCountResponse(BaseModel): """ GetSearchesCountResponse """ - count: int = Field(alias="count") + count: int """ Number of occurrences. """ - dates: List[DailySearches] = Field(alias="dates") + dates: List[DailySearches] """ Daily number of searches. """ model_config = ConfigDict( @@ -36,6 +45,7 @@ class GetSearchesCountResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/get_searches_no_clicks_response.py b/algoliasearch/analytics/models/get_searches_no_clicks_response.py index 6aac28d9c..5652ee92d 100644 --- a/algoliasearch/analytics/models/get_searches_no_clicks_response.py +++ b/algoliasearch/analytics/models/get_searches_no_clicks_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,13 +22,21 @@ DailySearchesNoClicks, ) +_ALIASES = { + "searches": "searches", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetSearchesNoClicksResponse(BaseModel): """ GetSearchesNoClicksResponse """ - searches: List[DailySearchesNoClicks] = Field(alias="searches") + searches: List[DailySearchesNoClicks] """ Searches without any clicks. """ model_config = ConfigDict( @@ -36,6 +44,7 @@ class GetSearchesNoClicksResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/get_searches_no_results_response.py b/algoliasearch/analytics/models/get_searches_no_results_response.py index 69950be9d..3ea836a83 100644 --- a/algoliasearch/analytics/models/get_searches_no_results_response.py +++ b/algoliasearch/analytics/models/get_searches_no_results_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,13 +22,21 @@ DailySearchesNoResults, ) +_ALIASES = { + "searches": "searches", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetSearchesNoResultsResponse(BaseModel): """ GetSearchesNoResultsResponse """ - searches: List[DailySearchesNoResults] = Field(alias="searches") + searches: List[DailySearchesNoResults] """ Searches without results. """ model_config = ConfigDict( @@ -36,6 +44,7 @@ class GetSearchesNoResultsResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/get_status_response.py b/algoliasearch/analytics/models/get_status_response.py index 558e35d9c..68bb07b7b 100644 --- a/algoliasearch/analytics/models/get_status_response.py +++ b/algoliasearch/analytics/models/get_status_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "updated_at": "updatedAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class GetStatusResponse(BaseModel): """ GetStatusResponse """ - updated_at: str = Field(alias="updatedAt") + updated_at: str """ Date and time when the object was updated, in RFC 3339 format. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class GetStatusResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/get_top_countries_response.py b/algoliasearch/analytics/models/get_top_countries_response.py index 200b13ec4..9470900ea 100644 --- a/algoliasearch/analytics/models/get_top_countries_response.py +++ b/algoliasearch/analytics/models/get_top_countries_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,13 +20,21 @@ from algoliasearch.analytics.models.top_country import TopCountry +_ALIASES = { + "countries": "countries", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetTopCountriesResponse(BaseModel): """ GetTopCountriesResponse """ - countries: List[TopCountry] = Field(alias="countries") + countries: List[TopCountry] """ Countries and number of searches. """ model_config = ConfigDict( @@ -34,6 +42,7 @@ class GetTopCountriesResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/get_top_filter_attribute.py b/algoliasearch/analytics/models/get_top_filter_attribute.py index d4e66a4b4..1d722507f 100644 --- a/algoliasearch/analytics/models/get_top_filter_attribute.py +++ b/algoliasearch/analytics/models/get_top_filter_attribute.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "attribute": "attribute", + "count": "count", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class GetTopFilterAttribute(BaseModel): """ GetTopFilterAttribute """ - attribute: str = Field(alias="attribute") + attribute: str """ Attribute name. """ - count: int = Field(alias="count") + count: int """ Number of occurrences. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class GetTopFilterAttribute(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/get_top_filter_attributes_response.py b/algoliasearch/analytics/models/get_top_filter_attributes_response.py index 2ac563a7b..51736d78c 100644 --- a/algoliasearch/analytics/models/get_top_filter_attributes_response.py +++ b/algoliasearch/analytics/models/get_top_filter_attributes_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,13 +22,21 @@ GetTopFilterAttribute, ) +_ALIASES = { + "attributes": "attributes", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetTopFilterAttributesResponse(BaseModel): """ GetTopFilterAttributesResponse """ - attributes: List[GetTopFilterAttribute] = Field(alias="attributes") + attributes: List[GetTopFilterAttribute] """ Most frequent filters. """ model_config = ConfigDict( @@ -36,6 +44,7 @@ class GetTopFilterAttributesResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/get_top_filter_for_attribute.py b/algoliasearch/analytics/models/get_top_filter_for_attribute.py index 1b7558075..13203dfa7 100644 --- a/algoliasearch/analytics/models/get_top_filter_for_attribute.py +++ b/algoliasearch/analytics/models/get_top_filter_for_attribute.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,18 +20,29 @@ from algoliasearch.analytics.models.operator import Operator +_ALIASES = { + "attribute": "attribute", + "operator": "operator", + "value": "value", + "count": "count", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetTopFilterForAttribute(BaseModel): """ GetTopFilterForAttribute """ - attribute: str = Field(alias="attribute") + attribute: str """ Attribute name. """ - operator: Operator = Field(alias="operator") - value: str = Field(alias="value") + operator: Operator + value: str """ Attribute value. """ - count: int = Field(alias="count") + count: int """ Number of occurrences. """ model_config = ConfigDict( @@ -39,6 +50,7 @@ class GetTopFilterForAttribute(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/get_top_filter_for_attribute_response.py b/algoliasearch/analytics/models/get_top_filter_for_attribute_response.py index ae2b3dd53..59d8198da 100644 --- a/algoliasearch/analytics/models/get_top_filter_for_attribute_response.py +++ b/algoliasearch/analytics/models/get_top_filter_for_attribute_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,13 +22,21 @@ GetTopFilterForAttribute, ) +_ALIASES = { + "values": "values", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetTopFilterForAttributeResponse(BaseModel): """ GetTopFilterForAttributeResponse """ - values: List[GetTopFilterForAttribute] = Field(alias="values") + values: List[GetTopFilterForAttribute] """ Filter values for an attribute. """ model_config = ConfigDict( @@ -36,6 +44,7 @@ class GetTopFilterForAttributeResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/get_top_filters_no_results_response.py b/algoliasearch/analytics/models/get_top_filters_no_results_response.py index 912db29ea..6d53c3ba7 100644 --- a/algoliasearch/analytics/models/get_top_filters_no_results_response.py +++ b/algoliasearch/analytics/models/get_top_filters_no_results_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,13 +22,21 @@ GetTopFiltersNoResultsValues, ) +_ALIASES = { + "values": "values", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetTopFiltersNoResultsResponse(BaseModel): """ GetTopFiltersNoResultsResponse """ - values: List[GetTopFiltersNoResultsValues] = Field(alias="values") + values: List[GetTopFiltersNoResultsValues] """ Filters for searches without any results. If null, the search term specified with the `search` parameter is not a search without results, or the `search` parameter is absent from the request. """ model_config = ConfigDict( @@ -36,6 +44,7 @@ class GetTopFiltersNoResultsResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/get_top_filters_no_results_value.py b/algoliasearch/analytics/models/get_top_filters_no_results_value.py index f9b93e8f7..5a88fa8dd 100644 --- a/algoliasearch/analytics/models/get_top_filters_no_results_value.py +++ b/algoliasearch/analytics/models/get_top_filters_no_results_value.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,16 +20,26 @@ from algoliasearch.analytics.models.operator import Operator +_ALIASES = { + "attribute": "attribute", + "operator": "operator", + "value": "value", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetTopFiltersNoResultsValue(BaseModel): """ GetTopFiltersNoResultsValue """ - attribute: str = Field(alias="attribute") + attribute: str """ Attribute name. """ - operator: Operator = Field(alias="operator") - value: str = Field(alias="value") + operator: Operator + value: str """ Attribute value. """ model_config = ConfigDict( @@ -37,6 +47,7 @@ class GetTopFiltersNoResultsValue(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/get_top_filters_no_results_values.py b/algoliasearch/analytics/models/get_top_filters_no_results_values.py index 737c0f196..20a1cacb0 100644 --- a/algoliasearch/analytics/models/get_top_filters_no_results_values.py +++ b/algoliasearch/analytics/models/get_top_filters_no_results_values.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,15 +22,24 @@ GetTopFiltersNoResultsValue, ) +_ALIASES = { + "count": "count", + "values": "values", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetTopFiltersNoResultsValues(BaseModel): """ GetTopFiltersNoResultsValues """ - count: int = Field(alias="count") + count: int """ Number of occurrences. """ - values: List[GetTopFiltersNoResultsValue] = Field(alias="values") + values: List[GetTopFiltersNoResultsValue] """ Filters with no results. """ model_config = ConfigDict( @@ -38,6 +47,7 @@ class GetTopFiltersNoResultsValues(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/get_top_hits_response.py b/algoliasearch/analytics/models/get_top_hits_response.py index b8e3798c6..0c48fb25b 100644 --- a/algoliasearch/analytics/models/get_top_hits_response.py +++ b/algoliasearch/analytics/models/get_top_hits_response.py @@ -42,12 +42,11 @@ class GetTopHitsResponse(BaseModel): default=None ) - actual_instance: Optional[ - Union[ - TopHitsResponse, - TopHitsResponseWithAnalytics, - TopHitsResponseWithRevenueAnalytics, - ] + actual_instance: Union[ + TopHitsResponse, + TopHitsResponseWithAnalytics, + TopHitsResponseWithRevenueAnalytics, + None, ] = None one_of_schemas: Set[str] = { "TopHitsResponse", @@ -65,19 +64,19 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer def unwrap_actual_instance( self, - ) -> Optional[ - Union[ - TopHitsResponse, - TopHitsResponseWithAnalytics, - TopHitsResponseWithRevenueAnalytics, - ] + ) -> Union[ + TopHitsResponse, + TopHitsResponseWithAnalytics, + TopHitsResponseWithRevenueAnalytics, + Self, + None, ]: """ Unwraps the `actual_instance` when calling the `to_json` method. @@ -127,9 +126,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -148,8 +147,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/analytics/models/get_top_searches_response.py b/algoliasearch/analytics/models/get_top_searches_response.py index e4d11b28b..58f7efe71 100644 --- a/algoliasearch/analytics/models/get_top_searches_response.py +++ b/algoliasearch/analytics/models/get_top_searches_response.py @@ -42,12 +42,11 @@ class GetTopSearchesResponse(BaseModel): default=None ) - actual_instance: Optional[ - Union[ - TopSearchesResponse, - TopSearchesResponseWithAnalytics, - TopSearchesResponseWithRevenueAnalytics, - ] + actual_instance: Union[ + TopSearchesResponse, + TopSearchesResponseWithAnalytics, + TopSearchesResponseWithRevenueAnalytics, + None, ] = None one_of_schemas: Set[str] = { "TopSearchesResponse", @@ -65,19 +64,19 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer def unwrap_actual_instance( self, - ) -> Optional[ - Union[ - TopSearchesResponse, - TopSearchesResponseWithAnalytics, - TopSearchesResponseWithRevenueAnalytics, - ] + ) -> Union[ + TopSearchesResponse, + TopSearchesResponseWithAnalytics, + TopSearchesResponseWithRevenueAnalytics, + Self, + None, ]: """ Unwraps the `actual_instance` when calling the `to_json` method. @@ -129,9 +128,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -150,8 +149,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/analytics/models/get_users_count_response.py b/algoliasearch/analytics/models/get_users_count_response.py index d7bd8e473..63fa29512 100644 --- a/algoliasearch/analytics/models/get_users_count_response.py +++ b/algoliasearch/analytics/models/get_users_count_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,15 +20,24 @@ from algoliasearch.analytics.models.daily_users import DailyUsers +_ALIASES = { + "count": "count", + "dates": "dates", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetUsersCountResponse(BaseModel): """ GetUsersCountResponse """ - count: int = Field(alias="count") + count: int """ Number of unique users. """ - dates: List[DailyUsers] = Field(alias="dates") + dates: List[DailyUsers] """ Daily number of unique users. """ model_config = ConfigDict( @@ -36,6 +45,7 @@ class GetUsersCountResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/top_country.py b/algoliasearch/analytics/models/top_country.py index f8fbccb9a..d1e04550b 100644 --- a/algoliasearch/analytics/models/top_country.py +++ b/algoliasearch/analytics/models/top_country.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "country": "country", + "count": "count", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class TopCountry(BaseModel): """ TopCountry """ - country: str = Field(alias="country") + country: str """ Country code. """ - count: int = Field(alias="count") + count: int """ Number of occurrences. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class TopCountry(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/top_hit.py b/algoliasearch/analytics/models/top_hit.py index c035e446b..93bc3aeba 100644 --- a/algoliasearch/analytics/models/top_hit.py +++ b/algoliasearch/analytics/models/top_hit.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "hit": "hit", + "count": "count", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class TopHit(BaseModel): """ TopHit """ - hit: str = Field(alias="hit") + hit: str """ Object ID of a record that's returned as a search result. """ - count: int = Field(alias="count") + count: int """ Number of occurrences. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class TopHit(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/top_hit_with_analytics.py b/algoliasearch/analytics/models/top_hit_with_analytics.py index 64b5ecf25..af0977d02 100644 --- a/algoliasearch/analytics/models/top_hit_with_analytics.py +++ b/algoliasearch/analytics/models/top_hit_with_analytics.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,24 +18,39 @@ from typing_extensions import Self +_ALIASES = { + "hit": "hit", + "count": "count", + "click_through_rate": "clickThroughRate", + "conversion_rate": "conversionRate", + "tracked_hit_count": "trackedHitCount", + "click_count": "clickCount", + "conversion_count": "conversionCount", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class TopHitWithAnalytics(BaseModel): """ TopHitWithAnalytics """ - hit: str = Field(alias="hit") + hit: str """ Object ID of a record that's returned as a search result. """ - count: int = Field(alias="count") + count: int """ Number of occurrences. """ - click_through_rate: float = Field(alias="clickThroughRate") + click_through_rate: float """ Click-through rate, calculated as number of tracked searches with at least one click event divided by the number of tracked searches. If null, Algolia didn't receive any search requests with `clickAnalytics` set to true. """ - conversion_rate: float = Field(alias="conversionRate") + conversion_rate: float """ Conversion rate, calculated as number of tracked searches with at least one conversion event divided by the number of tracked searches. If null, Algolia didn't receive any search requests with `clickAnalytics` set to true. """ - tracked_hit_count: int = Field(alias="trackedHitCount") + tracked_hit_count: int """ Number of tracked searches. Tracked searches are search requests where the `clickAnalytics` parameter is true. """ - click_count: int = Field(alias="clickCount") + click_count: int """ Number of clicks associated with this search. """ - conversion_count: int = Field(alias="conversionCount") + conversion_count: int """ Number of conversions from this search. """ model_config = ConfigDict( @@ -43,6 +58,7 @@ class TopHitWithAnalytics(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/top_hit_with_revenue_analytics.py b/algoliasearch/analytics/models/top_hit_with_revenue_analytics.py index a166649c2..338a142ac 100644 --- a/algoliasearch/analytics/models/top_hit_with_revenue_analytics.py +++ b/algoliasearch/analytics/models/top_hit_with_revenue_analytics.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,35 +20,54 @@ from algoliasearch.analytics.models.currency_code import CurrencyCode +_ALIASES = { + "hit": "hit", + "count": "count", + "click_through_rate": "clickThroughRate", + "conversion_rate": "conversionRate", + "tracked_hit_count": "trackedHitCount", + "click_count": "clickCount", + "conversion_count": "conversionCount", + "add_to_cart_rate": "addToCartRate", + "add_to_cart_count": "addToCartCount", + "purchase_rate": "purchaseRate", + "purchase_count": "purchaseCount", + "currencies": "currencies", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class TopHitWithRevenueAnalytics(BaseModel): """ TopHitWithRevenueAnalytics """ - hit: str = Field(alias="hit") + hit: str """ Object ID of a record that's returned as a search result. """ - count: int = Field(alias="count") + count: int """ Number of occurrences. """ - click_through_rate: float = Field(alias="clickThroughRate") + click_through_rate: float """ Click-through rate, calculated as number of tracked searches with at least one click event divided by the number of tracked searches. If null, Algolia didn't receive any search requests with `clickAnalytics` set to true. """ - conversion_rate: float = Field(alias="conversionRate") + conversion_rate: float """ Conversion rate, calculated as number of tracked searches with at least one conversion event divided by the number of tracked searches. If null, Algolia didn't receive any search requests with `clickAnalytics` set to true. """ - tracked_hit_count: int = Field(alias="trackedHitCount") + tracked_hit_count: int """ Number of tracked searches. Tracked searches are search requests where the `clickAnalytics` parameter is true. """ - click_count: int = Field(alias="clickCount") + click_count: int """ Number of clicks associated with this search. """ - conversion_count: int = Field(alias="conversionCount") + conversion_count: int """ Number of conversions from this search. """ - add_to_cart_rate: float = Field(alias="addToCartRate") + add_to_cart_rate: float """ Add-to-cart rate, calculated as number of tracked searches with at least one add-to-cart event divided by the number of tracked searches. If null, Algolia didn't receive any search requests with `clickAnalytics` set to true. """ - add_to_cart_count: int = Field(alias="addToCartCount") + add_to_cart_count: int """ Number of add-to-cart events from this search. """ - purchase_rate: float = Field(alias="purchaseRate") + purchase_rate: float """ Purchase rate, calculated as number of tracked searches with at least one purchase event divided by the number of tracked searches. If null, Algolia didn't receive any search requests with `clickAnalytics` set to true. """ - purchase_count: int = Field(alias="purchaseCount") + purchase_count: int """ Number of purchase events from this search. """ - currencies: Dict[str, CurrencyCode] = Field(alias="currencies") + currencies: Dict[str, CurrencyCode] """ Revenue associated with this search, broken-down by currencies. """ model_config = ConfigDict( @@ -56,6 +75,7 @@ class TopHitWithRevenueAnalytics(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/top_hits_response.py b/algoliasearch/analytics/models/top_hits_response.py index 8e2098b8d..a50823ea9 100644 --- a/algoliasearch/analytics/models/top_hits_response.py +++ b/algoliasearch/analytics/models/top_hits_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,13 +20,21 @@ from algoliasearch.analytics.models.top_hit import TopHit +_ALIASES = { + "hits": "hits", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class TopHitsResponse(BaseModel): """ TopHitsResponse """ - hits: List[TopHit] = Field(alias="hits") + hits: List[TopHit] """ Most frequent search results. """ model_config = ConfigDict( @@ -34,6 +42,7 @@ class TopHitsResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/top_hits_response_with_analytics.py b/algoliasearch/analytics/models/top_hits_response_with_analytics.py index 499374ed3..d60155fe8 100644 --- a/algoliasearch/analytics/models/top_hits_response_with_analytics.py +++ b/algoliasearch/analytics/models/top_hits_response_with_analytics.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,13 +20,21 @@ from algoliasearch.analytics.models.top_hit_with_analytics import TopHitWithAnalytics +_ALIASES = { + "hits": "hits", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class TopHitsResponseWithAnalytics(BaseModel): """ TopHitsResponseWithAnalytics """ - hits: List[TopHitWithAnalytics] = Field(alias="hits") + hits: List[TopHitWithAnalytics] """ Most frequent search results with click and conversion metrics. """ model_config = ConfigDict( @@ -34,6 +42,7 @@ class TopHitsResponseWithAnalytics(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/top_hits_response_with_revenue_analytics.py b/algoliasearch/analytics/models/top_hits_response_with_revenue_analytics.py index dd77747c1..622f3566b 100644 --- a/algoliasearch/analytics/models/top_hits_response_with_revenue_analytics.py +++ b/algoliasearch/analytics/models/top_hits_response_with_revenue_analytics.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,13 +22,21 @@ TopHitWithRevenueAnalytics, ) +_ALIASES = { + "hits": "hits", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class TopHitsResponseWithRevenueAnalytics(BaseModel): """ TopHitsResponseWithRevenueAnalytics """ - hits: List[TopHitWithRevenueAnalytics] = Field(alias="hits") + hits: List[TopHitWithRevenueAnalytics] """ Most frequent search results with click, conversion, and revenue metrics. """ model_config = ConfigDict( @@ -36,6 +44,7 @@ class TopHitsResponseWithRevenueAnalytics(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/top_search.py b/algoliasearch/analytics/models/top_search.py index dd1ddb989..f210c4662 100644 --- a/algoliasearch/analytics/models/top_search.py +++ b/algoliasearch/analytics/models/top_search.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "search": "search", + "count": "count", + "nb_hits": "nbHits", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class TopSearch(BaseModel): """ TopSearch """ - search: str = Field(alias="search") + search: str """ Search query. """ - count: int = Field(alias="count") + count: int """ Number of searches. """ - nb_hits: int = Field(alias="nbHits") + nb_hits: int """ Number of results (hits). """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class TopSearch(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/top_search_with_analytics.py b/algoliasearch/analytics/models/top_search_with_analytics.py index 7a2da7adf..8e654729d 100644 --- a/algoliasearch/analytics/models/top_search_with_analytics.py +++ b/algoliasearch/analytics/models/top_search_with_analytics.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,31 +20,48 @@ from algoliasearch.analytics.models.click_position import ClickPosition +_ALIASES = { + "search": "search", + "count": "count", + "click_through_rate": "clickThroughRate", + "average_click_position": "averageClickPosition", + "click_positions": "clickPositions", + "conversion_rate": "conversionRate", + "tracked_search_count": "trackedSearchCount", + "click_count": "clickCount", + "conversion_count": "conversionCount", + "nb_hits": "nbHits", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class TopSearchWithAnalytics(BaseModel): """ TopSearchWithAnalytics """ - search: str = Field(alias="search") + search: str """ Search query. """ - count: int = Field(alias="count") + count: int """ Number of searches. """ - click_through_rate: float = Field(alias="clickThroughRate") + click_through_rate: float """ Click-through rate, calculated as number of tracked searches with at least one click event divided by the number of tracked searches. If null, Algolia didn't receive any search requests with `clickAnalytics` set to true. """ - average_click_position: float = Field(alias="averageClickPosition") + average_click_position: float """ Average position of a clicked search result in the list of search results. If null, Algolia didn't receive any search requests with `clickAnalytics` set to true. """ - click_positions: List[ClickPosition] = Field(alias="clickPositions") + click_positions: List[ClickPosition] """ List of positions in the search results and clicks associated with this search. """ - conversion_rate: float = Field(alias="conversionRate") + conversion_rate: float """ Conversion rate, calculated as number of tracked searches with at least one conversion event divided by the number of tracked searches. If null, Algolia didn't receive any search requests with `clickAnalytics` set to true. """ - tracked_search_count: int = Field(alias="trackedSearchCount") + tracked_search_count: int """ Number of tracked searches. Tracked searches are search requests where the `clickAnalytics` parameter is true. """ - click_count: int = Field(alias="clickCount") + click_count: int """ Number of clicks associated with this search. """ - conversion_count: int = Field(alias="conversionCount") + conversion_count: int """ Number of conversions from this search. """ - nb_hits: int = Field(alias="nbHits") + nb_hits: int """ Number of results (hits). """ model_config = ConfigDict( @@ -52,6 +69,7 @@ class TopSearchWithAnalytics(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/top_search_with_revenue_analytics.py b/algoliasearch/analytics/models/top_search_with_revenue_analytics.py index 1b9f35cfc..83260cb08 100644 --- a/algoliasearch/analytics/models/top_search_with_revenue_analytics.py +++ b/algoliasearch/analytics/models/top_search_with_revenue_analytics.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,41 +21,63 @@ from algoliasearch.analytics.models.click_position import ClickPosition from algoliasearch.analytics.models.currency_code import CurrencyCode +_ALIASES = { + "search": "search", + "count": "count", + "click_through_rate": "clickThroughRate", + "average_click_position": "averageClickPosition", + "click_positions": "clickPositions", + "conversion_rate": "conversionRate", + "tracked_search_count": "trackedSearchCount", + "click_count": "clickCount", + "conversion_count": "conversionCount", + "nb_hits": "nbHits", + "currencies": "currencies", + "add_to_cart_rate": "addToCartRate", + "add_to_cart_count": "addToCartCount", + "purchase_rate": "purchaseRate", + "purchase_count": "purchaseCount", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class TopSearchWithRevenueAnalytics(BaseModel): """ TopSearchWithRevenueAnalytics """ - search: str = Field(alias="search") + search: str """ Search query. """ - count: int = Field(alias="count") + count: int """ Number of searches. """ - click_through_rate: float = Field(alias="clickThroughRate") + click_through_rate: float """ Click-through rate, calculated as number of tracked searches with at least one click event divided by the number of tracked searches. If null, Algolia didn't receive any search requests with `clickAnalytics` set to true. """ - average_click_position: float = Field(alias="averageClickPosition") + average_click_position: float """ Average position of a clicked search result in the list of search results. If null, Algolia didn't receive any search requests with `clickAnalytics` set to true. """ - click_positions: List[ClickPosition] = Field(alias="clickPositions") + click_positions: List[ClickPosition] """ List of positions in the search results and clicks associated with this search. """ - conversion_rate: float = Field(alias="conversionRate") + conversion_rate: float """ Conversion rate, calculated as number of tracked searches with at least one conversion event divided by the number of tracked searches. If null, Algolia didn't receive any search requests with `clickAnalytics` set to true. """ - tracked_search_count: int = Field(alias="trackedSearchCount") + tracked_search_count: int """ Number of tracked searches. Tracked searches are search requests where the `clickAnalytics` parameter is true. """ - click_count: int = Field(alias="clickCount") + click_count: int """ Number of clicks associated with this search. """ - conversion_count: int = Field(alias="conversionCount") + conversion_count: int """ Number of conversions from this search. """ - nb_hits: int = Field(alias="nbHits") + nb_hits: int """ Number of results (hits). """ - currencies: Dict[str, CurrencyCode] = Field(alias="currencies") + currencies: Dict[str, CurrencyCode] """ Revenue associated with this search, broken-down by currencies. """ - add_to_cart_rate: float = Field(alias="addToCartRate") + add_to_cart_rate: float """ Add-to-cart rate, calculated as number of tracked searches with at least one add-to-cart event divided by the number of tracked searches. If null, Algolia didn't receive any search requests with `clickAnalytics` set to true. """ - add_to_cart_count: int = Field(alias="addToCartCount") + add_to_cart_count: int """ Number of add-to-cart events from this search. """ - purchase_rate: float = Field(alias="purchaseRate") + purchase_rate: float """ Purchase rate, calculated as number of tracked searches with at least one purchase event divided by the number of tracked searches. If null, Algolia didn't receive any search requests with `clickAnalytics` set to true. """ - purchase_count: int = Field(alias="purchaseCount") + purchase_count: int """ Number of purchase events from this search. """ model_config = ConfigDict( @@ -63,6 +85,7 @@ class TopSearchWithRevenueAnalytics(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/top_searches_response.py b/algoliasearch/analytics/models/top_searches_response.py index 193775b9b..3bcc0868e 100644 --- a/algoliasearch/analytics/models/top_searches_response.py +++ b/algoliasearch/analytics/models/top_searches_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,13 +20,21 @@ from algoliasearch.analytics.models.top_search import TopSearch +_ALIASES = { + "searches": "searches", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class TopSearchesResponse(BaseModel): """ TopSearchesResponse """ - searches: List[TopSearch] = Field(alias="searches") + searches: List[TopSearch] """ Most popular searches and their number of search results (hits). """ model_config = ConfigDict( @@ -34,6 +42,7 @@ class TopSearchesResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/top_searches_response_with_analytics.py b/algoliasearch/analytics/models/top_searches_response_with_analytics.py index 1ebf8a523..e80fbae6e 100644 --- a/algoliasearch/analytics/models/top_searches_response_with_analytics.py +++ b/algoliasearch/analytics/models/top_searches_response_with_analytics.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,13 +22,21 @@ TopSearchWithAnalytics, ) +_ALIASES = { + "searches": "searches", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class TopSearchesResponseWithAnalytics(BaseModel): """ TopSearchesResponseWithAnalytics """ - searches: List[TopSearchWithAnalytics] = Field(alias="searches") + searches: List[TopSearchWithAnalytics] """ Most popular searches and their associated click and conversion metrics. """ model_config = ConfigDict( @@ -36,6 +44,7 @@ class TopSearchesResponseWithAnalytics(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/analytics/models/top_searches_response_with_revenue_analytics.py b/algoliasearch/analytics/models/top_searches_response_with_revenue_analytics.py index 30db54843..35b712c72 100644 --- a/algoliasearch/analytics/models/top_searches_response_with_revenue_analytics.py +++ b/algoliasearch/analytics/models/top_searches_response_with_revenue_analytics.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,13 +22,21 @@ TopSearchWithRevenueAnalytics, ) +_ALIASES = { + "searches": "searches", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class TopSearchesResponseWithRevenueAnalytics(BaseModel): """ TopSearchesResponseWithRevenueAnalytics """ - searches: List[TopSearchWithRevenueAnalytics] = Field(alias="searches") + searches: List[TopSearchWithRevenueAnalytics] """ Most popular searches, including their click and revenue metrics. """ model_config = ConfigDict( @@ -36,6 +44,7 @@ class TopSearchesResponseWithRevenueAnalytics(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/client.py b/algoliasearch/ingestion/client.py index f79cfac5e..563f8e405 100644 --- a/algoliasearch/ingestion/client.py +++ b/algoliasearch/ingestion/client.py @@ -8,7 +8,7 @@ from json import dumps from sys import version_info -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Dict, List, Optional, Union from urllib.parse import quote from warnings import warn @@ -21,8 +21,9 @@ from typing_extensions import Self from algoliasearch.http.api_response import ApiResponse +from algoliasearch.http.base_config import BaseConfig from algoliasearch.http.request_options import RequestOptions -from algoliasearch.http.serializer import bodySerializer +from algoliasearch.http.serializer import body_serializer from algoliasearch.http.transporter import Transporter from algoliasearch.http.transporter_sync import TransporterSync from algoliasearch.http.verb import Verb @@ -140,19 +141,21 @@ class IngestionClient: """ _transporter: Transporter - _config: IngestionConfig + _config: BaseConfig _request_options: RequestOptions def __init__( self, app_id: Optional[str] = None, api_key: Optional[str] = None, - region: str = None, + region: str = "", transporter: Optional[Transporter] = None, config: Optional[IngestionConfig] = None, ) -> None: if transporter is not None and config is None: - config = transporter._config + config = IngestionConfig( + transporter.config.app_id, transporter.config.api_key, region + ) if config is None: config = IngestionConfig(app_id, api_key, region) @@ -204,11 +207,11 @@ async def close(self) -> None: async def set_client_api_key(self, api_key: str) -> None: """Sets a new API key to authenticate requests.""" - self._transporter._config.set_client_api_key(api_key) + self._transporter.config.set_client_api_key(api_key) async def create_authentication_with_http_info( self, - authentication_create: AuthenticationCreate, + authentication_create: Union[AuthenticationCreate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -238,7 +241,7 @@ async def create_authentication_with_http_info( verb=Verb.POST, path="/1/authentications", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -246,7 +249,7 @@ async def create_authentication_with_http_info( async def create_authentication( self, - authentication_create: AuthenticationCreate, + authentication_create: Union[AuthenticationCreate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> AuthenticationCreateResponse: """ @@ -269,7 +272,7 @@ async def create_authentication( async def create_destination_with_http_info( self, - destination_create: DestinationCreate, + destination_create: Union[DestinationCreate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -299,7 +302,7 @@ async def create_destination_with_http_info( verb=Verb.POST, path="/1/destinations", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -307,7 +310,7 @@ async def create_destination_with_http_info( async def create_destination( self, - destination_create: DestinationCreate, + destination_create: Union[DestinationCreate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> DestinationCreateResponse: """ @@ -330,7 +333,7 @@ async def create_destination( async def create_source_with_http_info( self, - source_create: SourceCreate, + source_create: Union[SourceCreate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -360,7 +363,7 @@ async def create_source_with_http_info( verb=Verb.POST, path="/1/sources", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -368,7 +371,7 @@ async def create_source_with_http_info( async def create_source( self, - source_create: SourceCreate, + source_create: Union[SourceCreate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SourceCreateResponse: """ @@ -389,8 +392,11 @@ async def create_source( async def create_task_with_http_info( self, - task_create: Annotated[ - TaskCreate, Field(description="Request body for creating a task.") + task_create: Union[ + Annotated[ + TaskCreate, Field(description="Request body for creating a task.") + ], + dict[str, Any], ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -417,7 +423,7 @@ async def create_task_with_http_info( verb=Verb.POST, path="/2/tasks", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -425,8 +431,11 @@ async def create_task_with_http_info( async def create_task( self, - task_create: Annotated[ - TaskCreate, Field(description="Request body for creating a task.") + task_create: Union[ + Annotated[ + TaskCreate, Field(description="Request body for creating a task.") + ], + dict[str, Any], ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> TaskCreateResponse: @@ -444,8 +453,11 @@ async def create_task( async def create_task_v1_with_http_info( self, - task_create: Annotated[ - TaskCreateV1, Field(description="Request body for creating a task.") + task_create: Union[ + Annotated[ + TaskCreateV1, Field(description="Request body for creating a task.") + ], + dict[str, Any], ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -472,7 +484,7 @@ async def create_task_v1_with_http_info( verb=Verb.POST, path="/1/tasks", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -480,8 +492,11 @@ async def create_task_v1_with_http_info( async def create_task_v1( self, - task_create: Annotated[ - TaskCreateV1, Field(description="Request body for creating a task.") + task_create: Union[ + Annotated[ + TaskCreateV1, Field(description="Request body for creating a task.") + ], + dict[str, Any], ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> TaskCreateResponse: @@ -499,9 +514,12 @@ async def create_task_v1( async def create_transformation_with_http_info( self, - transformation_create: Annotated[ - TransformationCreate, - Field(description="Request body for creating a transformation."), + transformation_create: Union[ + Annotated[ + TransformationCreate, + Field(description="Request body for creating a transformation."), + ], + dict[str, Any], ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -528,7 +546,7 @@ async def create_transformation_with_http_info( verb=Verb.POST, path="/1/transformations", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -536,9 +554,12 @@ async def create_transformation_with_http_info( async def create_transformation( self, - transformation_create: Annotated[ - TransformationCreate, - Field(description="Request body for creating a transformation."), + transformation_create: Union[ + Annotated[ + TransformationCreate, + Field(description="Request body for creating a transformation."), + ], + dict[str, Any], ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> TransformationCreateResponse: @@ -587,11 +608,11 @@ async def custom_delete_with_http_info( "Parameter `path` is required when calling `custom_delete`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return await self._transporter.request( verb=Verb.DELETE, @@ -662,11 +683,11 @@ async def custom_get_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_get`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return await self._transporter.request( verb=Verb.GET, @@ -741,11 +762,11 @@ async def custom_post_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_post`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -756,7 +777,7 @@ async def custom_post_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -833,11 +854,11 @@ async def custom_put_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_put`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -848,7 +869,7 @@ async def custom_put_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -1979,19 +2000,32 @@ async def list_authentications_with_http_info( Optional[List[AuthenticationType]], Field(description="Type of authentication resource to retrieve."), ] = None, - platform: Annotated[ - Optional[List[PlatformWithNone]], - Field( - description="Ecommerce platform for which to retrieve authentications." - ), - ] = None, - sort: Annotated[ - Optional[AuthenticationSortKeys], - Field(description="Property by which to sort the list of authentications."), - ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + platform: Union[ + Annotated[ + Optional[List[PlatformWithNone]], + Field( + description="Ecommerce platform for which to retrieve authentications." + ), + ], + list[dict[str, Any]], + ] = None, + sort: Union[ + Annotated[ + Optional[AuthenticationSortKeys], + Field( + description="Property by which to sort the list of authentications." + ), + ], + str, + ] = None, + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -2019,20 +2053,20 @@ async def list_authentications_with_http_info( :return: Returns the raw algoliasearch 'APIResponse' object. """ - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if items_per_page is not None: - _query_parameters.append(("itemsPerPage", items_per_page)) + _query_parameters["itemsPerPage"] = items_per_page if page is not None: - _query_parameters.append(("page", page)) + _query_parameters["page"] = page if type is not None: - _query_parameters.append(("type", type)) + _query_parameters["type"] = type if platform is not None: - _query_parameters.append(("platform", platform)) + _query_parameters["platform"] = platform if sort is not None: - _query_parameters.append(("sort", sort)) + _query_parameters["sort"] = sort if order is not None: - _query_parameters.append(("order", order)) + _query_parameters["order"] = order return await self._transporter.request( verb=Verb.GET, @@ -2058,19 +2092,32 @@ async def list_authentications( Optional[List[AuthenticationType]], Field(description="Type of authentication resource to retrieve."), ] = None, - platform: Annotated[ - Optional[List[PlatformWithNone]], - Field( - description="Ecommerce platform for which to retrieve authentications." - ), - ] = None, - sort: Annotated[ - Optional[AuthenticationSortKeys], - Field(description="Property by which to sort the list of authentications."), - ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + platform: Union[ + Annotated[ + Optional[List[PlatformWithNone]], + Field( + description="Ecommerce platform for which to retrieve authentications." + ), + ], + list[dict[str, Any]], + ] = None, + sort: Union[ + Annotated[ + Optional[AuthenticationSortKeys], + Field( + description="Property by which to sort the list of authentications." + ), + ], + str, + ] = None, + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ListAuthenticationsResponse: @@ -2123,13 +2170,21 @@ async def list_destinations_with_http_info( Optional[StrictStr], Field(description="Get the list of destinations used by a transformation."), ] = None, - sort: Annotated[ - Optional[DestinationSortKeys], - Field(description="Property by which to sort the destinations."), + sort: Union[ + Annotated[ + Optional[DestinationSortKeys], + Field(description="Property by which to sort the destinations."), + ], + str, ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -2159,22 +2214,22 @@ async def list_destinations_with_http_info( :return: Returns the raw algoliasearch 'APIResponse' object. """ - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if items_per_page is not None: - _query_parameters.append(("itemsPerPage", items_per_page)) + _query_parameters["itemsPerPage"] = items_per_page if page is not None: - _query_parameters.append(("page", page)) + _query_parameters["page"] = page if type is not None: - _query_parameters.append(("type", type)) + _query_parameters["type"] = type if authentication_id is not None: - _query_parameters.append(("authenticationID", authentication_id)) + _query_parameters["authenticationID"] = authentication_id if transformation_id is not None: - _query_parameters.append(("transformationID", transformation_id)) + _query_parameters["transformationID"] = transformation_id if sort is not None: - _query_parameters.append(("sort", sort)) + _query_parameters["sort"] = sort if order is not None: - _query_parameters.append(("order", order)) + _query_parameters["order"] = order return await self._transporter.request( verb=Verb.GET, @@ -2207,13 +2262,21 @@ async def list_destinations( Optional[StrictStr], Field(description="Get the list of destinations used by a transformation."), ] = None, - sort: Annotated[ - Optional[DestinationSortKeys], - Field(description="Property by which to sort the destinations."), + sort: Union[ + Annotated[ + Optional[DestinationSortKeys], + Field(description="Property by which to sort the destinations."), + ], + str, ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ListDestinationsResponse: @@ -2275,13 +2338,23 @@ async def list_events_with_http_info( Optional[List[EventType]], Field(description="Event type for filtering the list of task runs."), ] = None, - sort: Annotated[ - Optional[EventSortKeys], - Field(description="Property by which to sort the list of task run events."), - ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + sort: Union[ + Annotated[ + Optional[EventSortKeys], + Field( + description="Property by which to sort the list of task run events." + ), + ], + str, + ] = None, + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, start_date: Annotated[ Optional[StrictStr], @@ -2332,24 +2405,24 @@ async def list_events_with_http_info( "Parameter `run_id` is required when calling `list_events`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if items_per_page is not None: - _query_parameters.append(("itemsPerPage", items_per_page)) + _query_parameters["itemsPerPage"] = items_per_page if page is not None: - _query_parameters.append(("page", page)) + _query_parameters["page"] = page if status is not None: - _query_parameters.append(("status", status)) + _query_parameters["status"] = status if type is not None: - _query_parameters.append(("type", type)) + _query_parameters["type"] = type if sort is not None: - _query_parameters.append(("sort", sort)) + _query_parameters["sort"] = sort if order is not None: - _query_parameters.append(("order", order)) + _query_parameters["order"] = order if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date return await self._transporter.request( verb=Verb.GET, @@ -2384,13 +2457,23 @@ async def list_events( Optional[List[EventType]], Field(description="Event type for filtering the list of task runs."), ] = None, - sort: Annotated[ - Optional[EventSortKeys], - Field(description="Property by which to sort the list of task run events."), - ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + sort: Union[ + Annotated[ + Optional[EventSortKeys], + Field( + description="Property by which to sort the list of task run events." + ), + ], + str, + ] = None, + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, start_date: Annotated[ Optional[StrictStr], @@ -2471,13 +2554,21 @@ async def list_runs_with_http_info( Optional[StrictStr], Field(description="Task ID for filtering the list of task runs."), ] = None, - sort: Annotated[ - Optional[RunSortKeys], - Field(description="Property by which to sort the list of task runs."), - ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + sort: Union[ + Annotated[ + Optional[RunSortKeys], + Field(description="Property by which to sort the list of task runs."), + ], + str, + ] = None, + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, start_date: Annotated[ Optional[StrictStr], @@ -2523,26 +2614,26 @@ async def list_runs_with_http_info( :return: Returns the raw algoliasearch 'APIResponse' object. """ - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if items_per_page is not None: - _query_parameters.append(("itemsPerPage", items_per_page)) + _query_parameters["itemsPerPage"] = items_per_page if page is not None: - _query_parameters.append(("page", page)) + _query_parameters["page"] = page if status is not None: - _query_parameters.append(("status", status)) + _query_parameters["status"] = status if type is not None: - _query_parameters.append(("type", type)) + _query_parameters["type"] = type if task_id is not None: - _query_parameters.append(("taskID", task_id)) + _query_parameters["taskID"] = task_id if sort is not None: - _query_parameters.append(("sort", sort)) + _query_parameters["sort"] = sort if order is not None: - _query_parameters.append(("order", order)) + _query_parameters["order"] = order if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date return await self._transporter.request( verb=Verb.GET, @@ -2576,13 +2667,21 @@ async def list_runs( Optional[StrictStr], Field(description="Task ID for filtering the list of task runs."), ] = None, - sort: Annotated[ - Optional[RunSortKeys], - Field(description="Property by which to sort the list of task runs."), - ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + sort: Union[ + Annotated[ + Optional[RunSortKeys], + Field(description="Property by which to sort the list of task runs."), + ], + str, + ] = None, + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, start_date: Annotated[ Optional[StrictStr], @@ -2661,13 +2760,21 @@ async def list_sources_with_http_info( description="Authentication IDs of the sources to retrieve. 'none' returns sources that doesn't have an authentication. " ), ] = None, - sort: Annotated[ - Optional[SourceSortKeys], - Field(description="Property by which to sort the list of sources."), + sort: Union[ + Annotated[ + Optional[SourceSortKeys], + Field(description="Property by which to sort the list of sources."), + ], + str, ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -2695,20 +2802,20 @@ async def list_sources_with_http_info( :return: Returns the raw algoliasearch 'APIResponse' object. """ - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if items_per_page is not None: - _query_parameters.append(("itemsPerPage", items_per_page)) + _query_parameters["itemsPerPage"] = items_per_page if page is not None: - _query_parameters.append(("page", page)) + _query_parameters["page"] = page if type is not None: - _query_parameters.append(("type", type)) + _query_parameters["type"] = type if authentication_id is not None: - _query_parameters.append(("authenticationID", authentication_id)) + _query_parameters["authenticationID"] = authentication_id if sort is not None: - _query_parameters.append(("sort", sort)) + _query_parameters["sort"] = sort if order is not None: - _query_parameters.append(("order", order)) + _query_parameters["order"] = order return await self._transporter.request( verb=Verb.GET, @@ -2740,13 +2847,21 @@ async def list_sources( description="Authentication IDs of the sources to retrieve. 'none' returns sources that doesn't have an authentication. " ), ] = None, - sort: Annotated[ - Optional[SourceSortKeys], - Field(description="Property by which to sort the list of sources."), + sort: Union[ + Annotated[ + Optional[SourceSortKeys], + Field(description="Property by which to sort the list of sources."), + ], + str, ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ListSourcesResponse: @@ -2810,13 +2925,21 @@ async def list_tasks_with_http_info( Optional[List[TriggerType]], Field(description="Type of task trigger for filtering the list of tasks."), ] = None, - sort: Annotated[ - Optional[TaskSortKeys], - Field(description="Property by which to sort the list of tasks."), + sort: Union[ + Annotated[ + Optional[TaskSortKeys], + Field(description="Property by which to sort the list of tasks."), + ], + str, ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -2850,26 +2973,26 @@ async def list_tasks_with_http_info( :return: Returns the raw algoliasearch 'APIResponse' object. """ - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if items_per_page is not None: - _query_parameters.append(("itemsPerPage", items_per_page)) + _query_parameters["itemsPerPage"] = items_per_page if page is not None: - _query_parameters.append(("page", page)) + _query_parameters["page"] = page if action is not None: - _query_parameters.append(("action", action)) + _query_parameters["action"] = action if enabled is not None: - _query_parameters.append(("enabled", enabled)) + _query_parameters["enabled"] = enabled if source_id is not None: - _query_parameters.append(("sourceID", source_id)) + _query_parameters["sourceID"] = source_id if destination_id is not None: - _query_parameters.append(("destinationID", destination_id)) + _query_parameters["destinationID"] = destination_id if trigger_type is not None: - _query_parameters.append(("triggerType", trigger_type)) + _query_parameters["triggerType"] = trigger_type if sort is not None: - _query_parameters.append(("sort", sort)) + _query_parameters["sort"] = sort if order is not None: - _query_parameters.append(("order", order)) + _query_parameters["order"] = order return await self._transporter.request( verb=Verb.GET, @@ -2913,13 +3036,21 @@ async def list_tasks( Optional[List[TriggerType]], Field(description="Type of task trigger for filtering the list of tasks."), ] = None, - sort: Annotated[ - Optional[TaskSortKeys], - Field(description="Property by which to sort the list of tasks."), + sort: Union[ + Annotated[ + Optional[TaskSortKeys], + Field(description="Property by which to sort the list of tasks."), + ], + str, ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ListTasksResponse: @@ -2998,13 +3129,21 @@ async def list_tasks_v1_with_http_info( Optional[List[TriggerType]], Field(description="Type of task trigger for filtering the list of tasks."), ] = None, - sort: Annotated[ - Optional[TaskSortKeys], - Field(description="Property by which to sort the list of tasks."), + sort: Union[ + Annotated[ + Optional[TaskSortKeys], + Field(description="Property by which to sort the list of tasks."), + ], + str, ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -3038,26 +3177,26 @@ async def list_tasks_v1_with_http_info( :return: Returns the raw algoliasearch 'APIResponse' object. """ - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if items_per_page is not None: - _query_parameters.append(("itemsPerPage", items_per_page)) + _query_parameters["itemsPerPage"] = items_per_page if page is not None: - _query_parameters.append(("page", page)) + _query_parameters["page"] = page if action is not None: - _query_parameters.append(("action", action)) + _query_parameters["action"] = action if enabled is not None: - _query_parameters.append(("enabled", enabled)) + _query_parameters["enabled"] = enabled if source_id is not None: - _query_parameters.append(("sourceID", source_id)) + _query_parameters["sourceID"] = source_id if destination_id is not None: - _query_parameters.append(("destinationID", destination_id)) + _query_parameters["destinationID"] = destination_id if trigger_type is not None: - _query_parameters.append(("triggerType", trigger_type)) + _query_parameters["triggerType"] = trigger_type if sort is not None: - _query_parameters.append(("sort", sort)) + _query_parameters["sort"] = sort if order is not None: - _query_parameters.append(("order", order)) + _query_parameters["order"] = order return await self._transporter.request( verb=Verb.GET, @@ -3101,13 +3240,21 @@ async def list_tasks_v1( Optional[List[TriggerType]], Field(description="Type of task trigger for filtering the list of tasks."), ] = None, - sort: Annotated[ - Optional[TaskSortKeys], - Field(description="Property by which to sort the list of tasks."), + sort: Union[ + Annotated[ + Optional[TaskSortKeys], + Field(description="Property by which to sort the list of tasks."), + ], + str, ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ListTasksResponseV1: @@ -3164,13 +3311,23 @@ async def list_transformations_with_http_info( Optional[Annotated[int, Field(strict=True, ge=1)]], Field(description="Page number of the paginated API response."), ] = None, - sort: Annotated[ - Optional[TransformationSortKeys], - Field(description="Property by which to sort the list of transformations."), - ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + sort: Union[ + Annotated[ + Optional[TransformationSortKeys], + Field( + description="Property by which to sort the list of transformations." + ), + ], + str, + ] = None, + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -3194,16 +3351,16 @@ async def list_transformations_with_http_info( :return: Returns the raw algoliasearch 'APIResponse' object. """ - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if items_per_page is not None: - _query_parameters.append(("itemsPerPage", items_per_page)) + _query_parameters["itemsPerPage"] = items_per_page if page is not None: - _query_parameters.append(("page", page)) + _query_parameters["page"] = page if sort is not None: - _query_parameters.append(("sort", sort)) + _query_parameters["sort"] = sort if order is not None: - _query_parameters.append(("order", order)) + _query_parameters["order"] = order return await self._transporter.request( verb=Verb.GET, @@ -3225,13 +3382,23 @@ async def list_transformations( Optional[Annotated[int, Field(strict=True, ge=1)]], Field(description="Page number of the paginated API response."), ] = None, - sort: Annotated[ - Optional[TransformationSortKeys], - Field(description="Property by which to sort the list of transformations."), - ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + sort: Union[ + Annotated[ + Optional[TransformationSortKeys], + Field( + description="Property by which to sort the list of transformations." + ), + ], + str, + ] = None, + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ListTransformationsResponse: @@ -3264,11 +3431,14 @@ async def push_task_with_http_info( task_id: Annotated[ StrictStr, Field(description="Unique identifier of a task.") ], - push_task_payload: Annotated[ - PushTaskPayload, - Field( - description="Request body of a Search API `batch` request that will be pushed in the Connectors pipeline." - ), + push_task_payload: Union[ + Annotated[ + PushTaskPayload, + Field( + description="Request body of a Search API `batch` request that will be pushed in the Connectors pipeline." + ), + ], + dict[str, Any], ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -3308,7 +3478,7 @@ async def push_task_with_http_info( "{taskID}", quote(str(task_id), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -3319,11 +3489,14 @@ async def push_task( task_id: Annotated[ StrictStr, Field(description="Unique identifier of a task.") ], - push_task_payload: Annotated[ - PushTaskPayload, - Field( - description="Request body of a Search API `batch` request that will be pushed in the Connectors pipeline." - ), + push_task_payload: Union[ + Annotated[ + PushTaskPayload, + Field( + description="Request body of a Search API `batch` request that will be pushed in the Connectors pipeline." + ), + ], + dict[str, Any], ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> RunResponse: @@ -3352,7 +3525,7 @@ async def run_source_with_http_info( source_id: Annotated[ StrictStr, Field(description="Unique identifier of a source.") ], - run_source_payload: Optional[RunSourcePayload] = None, + run_source_payload: Union[Optional[RunSourcePayload], dict[str, Any]] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -3386,7 +3559,7 @@ async def run_source_with_http_info( "{sourceID}", quote(str(source_id), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -3397,7 +3570,7 @@ async def run_source( source_id: Annotated[ StrictStr, Field(description="Unique identifier of a source.") ], - run_source_payload: Optional[RunSourcePayload] = None, + run_source_payload: Union[Optional[RunSourcePayload], dict[str, Any]] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> RunSourceResponse: """ @@ -3540,7 +3713,7 @@ async def run_task_v1( async def search_authentications_with_http_info( self, - authentication_search: AuthenticationSearch, + authentication_search: Union[AuthenticationSearch, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -3570,7 +3743,7 @@ async def search_authentications_with_http_info( verb=Verb.POST, path="/1/authentications/search", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -3578,7 +3751,7 @@ async def search_authentications_with_http_info( async def search_authentications( self, - authentication_search: AuthenticationSearch, + authentication_search: Union[AuthenticationSearch, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> List[Authentication]: """ @@ -3601,7 +3774,7 @@ async def search_authentications( async def search_destinations_with_http_info( self, - destination_search: DestinationSearch, + destination_search: Union[DestinationSearch, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -3631,7 +3804,7 @@ async def search_destinations_with_http_info( verb=Verb.POST, path="/1/destinations/search", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -3639,7 +3812,7 @@ async def search_destinations_with_http_info( async def search_destinations( self, - destination_search: DestinationSearch, + destination_search: Union[DestinationSearch, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> List[Destination]: """ @@ -3662,7 +3835,7 @@ async def search_destinations( async def search_sources_with_http_info( self, - source_search: SourceSearch, + source_search: Union[SourceSearch, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -3692,7 +3865,7 @@ async def search_sources_with_http_info( verb=Verb.POST, path="/1/sources/search", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -3700,7 +3873,7 @@ async def search_sources_with_http_info( async def search_sources( self, - source_search: SourceSearch, + source_search: Union[SourceSearch, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> List[Source]: """ @@ -3721,7 +3894,7 @@ async def search_sources( async def search_tasks_with_http_info( self, - task_search: TaskSearch, + task_search: Union[TaskSearch, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -3751,7 +3924,7 @@ async def search_tasks_with_http_info( verb=Verb.POST, path="/2/tasks/search", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -3759,7 +3932,7 @@ async def search_tasks_with_http_info( async def search_tasks( self, - task_search: TaskSearch, + task_search: Union[TaskSearch, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> List[Task]: """ @@ -3780,7 +3953,7 @@ async def search_tasks( async def search_tasks_v1_with_http_info( self, - task_search: TaskSearch, + task_search: Union[TaskSearch, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -3810,7 +3983,7 @@ async def search_tasks_v1_with_http_info( verb=Verb.POST, path="/1/tasks/search", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -3818,7 +3991,7 @@ async def search_tasks_v1_with_http_info( async def search_tasks_v1( self, - task_search: TaskSearch, + task_search: Union[TaskSearch, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> List[TaskV1]: """ @@ -3839,7 +4012,7 @@ async def search_tasks_v1( async def search_transformations_with_http_info( self, - transformation_search: TransformationSearch, + transformation_search: Union[TransformationSearch, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -3869,7 +4042,7 @@ async def search_transformations_with_http_info( verb=Verb.POST, path="/1/transformations/search", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -3877,7 +4050,7 @@ async def search_transformations_with_http_info( async def search_transformations( self, - transformation_search: TransformationSearch, + transformation_search: Union[TransformationSearch, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> List[Transformation]: """ @@ -3962,7 +4135,7 @@ async def trigger_docker_source_discover( async def try_transformation_with_http_info( self, - transformation_try: TransformationTry, + transformation_try: Union[TransformationTry, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -3992,7 +4165,7 @@ async def try_transformation_with_http_info( verb=Verb.POST, path="/1/transformations/try", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -4000,7 +4173,7 @@ async def try_transformation_with_http_info( async def try_transformation( self, - transformation_try: TransformationTry, + transformation_try: Union[TransformationTry, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> TransformationTryResponse: """ @@ -4026,7 +4199,7 @@ async def try_transformation_before_update_with_http_info( transformation_id: Annotated[ StrictStr, Field(description="Unique identifier of a transformation.") ], - transformation_try: TransformationTry, + transformation_try: Union[TransformationTry, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -4065,7 +4238,7 @@ async def try_transformation_before_update_with_http_info( "{transformationID}", quote(str(transformation_id), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -4076,7 +4249,7 @@ async def try_transformation_before_update( transformation_id: Annotated[ StrictStr, Field(description="Unique identifier of a transformation.") ], - transformation_try: TransformationTry, + transformation_try: Union[TransformationTry, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> TransformationTryResponse: """ @@ -4105,7 +4278,7 @@ async def update_authentication_with_http_info( StrictStr, Field(description="Unique identifier of an authentication resource."), ], - authentication_update: AuthenticationUpdate, + authentication_update: Union[AuthenticationUpdate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -4144,7 +4317,7 @@ async def update_authentication_with_http_info( "{authenticationID}", quote(str(authentication_id), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -4156,7 +4329,7 @@ async def update_authentication( StrictStr, Field(description="Unique identifier of an authentication resource."), ], - authentication_update: AuthenticationUpdate, + authentication_update: Union[AuthenticationUpdate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> AuthenticationUpdateResponse: """ @@ -4184,7 +4357,7 @@ async def update_destination_with_http_info( destination_id: Annotated[ StrictStr, Field(description="Unique identifier of a destination.") ], - destination_update: DestinationUpdate, + destination_update: Union[DestinationUpdate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -4223,7 +4396,7 @@ async def update_destination_with_http_info( "{destinationID}", quote(str(destination_id), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -4234,7 +4407,7 @@ async def update_destination( destination_id: Annotated[ StrictStr, Field(description="Unique identifier of a destination.") ], - destination_update: DestinationUpdate, + destination_update: Union[DestinationUpdate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> DestinationUpdateResponse: """ @@ -4262,7 +4435,7 @@ async def update_source_with_http_info( source_id: Annotated[ StrictStr, Field(description="Unique identifier of a source.") ], - source_update: SourceUpdate, + source_update: Union[SourceUpdate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -4301,7 +4474,7 @@ async def update_source_with_http_info( "{sourceID}", quote(str(source_id), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -4312,7 +4485,7 @@ async def update_source( source_id: Annotated[ StrictStr, Field(description="Unique identifier of a source.") ], - source_update: SourceUpdate, + source_update: Union[SourceUpdate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SourceUpdateResponse: """ @@ -4340,7 +4513,7 @@ async def update_task_with_http_info( task_id: Annotated[ StrictStr, Field(description="Unique identifier of a task.") ], - task_update: TaskUpdate, + task_update: Union[TaskUpdate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -4373,7 +4546,7 @@ async def update_task_with_http_info( verb=Verb.PATCH, path="/2/tasks/{taskID}".replace("{taskID}", quote(str(task_id), safe="")), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -4384,7 +4557,7 @@ async def update_task( task_id: Annotated[ StrictStr, Field(description="Unique identifier of a task.") ], - task_update: TaskUpdate, + task_update: Union[TaskUpdate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> TaskUpdateResponse: """ @@ -4408,7 +4581,7 @@ async def update_task_v1_with_http_info( task_id: Annotated[ StrictStr, Field(description="Unique identifier of a task.") ], - task_update: TaskUpdateV1, + task_update: Union[TaskUpdateV1, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -4441,7 +4614,7 @@ async def update_task_v1_with_http_info( verb=Verb.PATCH, path="/1/tasks/{taskID}".replace("{taskID}", quote(str(task_id), safe="")), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -4452,7 +4625,7 @@ async def update_task_v1( task_id: Annotated[ StrictStr, Field(description="Unique identifier of a task.") ], - task_update: TaskUpdateV1, + task_update: Union[TaskUpdateV1, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> TaskUpdateResponse: """ @@ -4476,7 +4649,7 @@ async def update_transformation_with_http_info( transformation_id: Annotated[ StrictStr, Field(description="Unique identifier of a transformation.") ], - transformation_create: TransformationCreate, + transformation_create: Union[TransformationCreate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -4511,7 +4684,7 @@ async def update_transformation_with_http_info( "{transformationID}", quote(str(transformation_id), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -4522,7 +4695,7 @@ async def update_transformation( transformation_id: Annotated[ StrictStr, Field(description="Unique identifier of a transformation.") ], - transformation_create: TransformationCreate, + transformation_create: Union[TransformationCreate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> TransformationUpdateResponse: """ @@ -4543,7 +4716,7 @@ async def update_transformation( async def validate_source_with_http_info( self, - source_create: Optional[SourceCreate] = None, + source_create: Union[Optional[SourceCreate], dict[str, Any]] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -4568,7 +4741,7 @@ async def validate_source_with_http_info( verb=Verb.POST, path="/1/sources/validate", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -4576,7 +4749,7 @@ async def validate_source_with_http_info( async def validate_source( self, - source_create: Optional[SourceCreate] = None, + source_create: Union[Optional[SourceCreate], dict[str, Any]] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SourceWatchResponse: """ @@ -4600,7 +4773,7 @@ async def validate_source_before_update_with_http_info( source_id: Annotated[ StrictStr, Field(description="Unique identifier of a source.") ], - source_update: SourceUpdate, + source_update: Union[SourceUpdate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -4639,7 +4812,7 @@ async def validate_source_before_update_with_http_info( "{sourceID}", quote(str(source_id), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -4650,7 +4823,7 @@ async def validate_source_before_update( source_id: Annotated[ StrictStr, Field(description="Unique identifier of a source.") ], - source_update: SourceUpdate, + source_update: Union[SourceUpdate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SourceWatchResponse: """ @@ -4693,19 +4866,21 @@ class IngestionClientSync: """ _transporter: TransporterSync - _config: IngestionConfig + _config: BaseConfig _request_options: RequestOptions def __init__( self, app_id: Optional[str] = None, api_key: Optional[str] = None, - region: str = None, + region: str = "", transporter: Optional[TransporterSync] = None, config: Optional[IngestionConfig] = None, ) -> None: if transporter is not None and config is None: - config = transporter._config + config = IngestionConfig( + transporter.config.app_id, transporter.config.api_key, region + ) if config is None: config = IngestionConfig(app_id, api_key, region) @@ -4756,11 +4931,11 @@ def close(self) -> None: def set_client_api_key(self, api_key: str) -> None: """Sets a new API key to authenticate requests.""" - self._transporter._config.set_client_api_key(api_key) + self._transporter.config.set_client_api_key(api_key) def create_authentication_with_http_info( self, - authentication_create: AuthenticationCreate, + authentication_create: Union[AuthenticationCreate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -4790,7 +4965,7 @@ def create_authentication_with_http_info( verb=Verb.POST, path="/1/authentications", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -4798,7 +4973,7 @@ def create_authentication_with_http_info( def create_authentication( self, - authentication_create: AuthenticationCreate, + authentication_create: Union[AuthenticationCreate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> AuthenticationCreateResponse: """ @@ -4821,7 +4996,7 @@ def create_authentication( def create_destination_with_http_info( self, - destination_create: DestinationCreate, + destination_create: Union[DestinationCreate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -4851,7 +5026,7 @@ def create_destination_with_http_info( verb=Verb.POST, path="/1/destinations", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -4859,7 +5034,7 @@ def create_destination_with_http_info( def create_destination( self, - destination_create: DestinationCreate, + destination_create: Union[DestinationCreate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> DestinationCreateResponse: """ @@ -4882,7 +5057,7 @@ def create_destination( def create_source_with_http_info( self, - source_create: SourceCreate, + source_create: Union[SourceCreate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -4912,7 +5087,7 @@ def create_source_with_http_info( verb=Verb.POST, path="/1/sources", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -4920,7 +5095,7 @@ def create_source_with_http_info( def create_source( self, - source_create: SourceCreate, + source_create: Union[SourceCreate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SourceCreateResponse: """ @@ -4941,8 +5116,11 @@ def create_source( def create_task_with_http_info( self, - task_create: Annotated[ - TaskCreate, Field(description="Request body for creating a task.") + task_create: Union[ + Annotated[ + TaskCreate, Field(description="Request body for creating a task.") + ], + dict[str, Any], ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -4969,7 +5147,7 @@ def create_task_with_http_info( verb=Verb.POST, path="/2/tasks", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -4977,8 +5155,11 @@ def create_task_with_http_info( def create_task( self, - task_create: Annotated[ - TaskCreate, Field(description="Request body for creating a task.") + task_create: Union[ + Annotated[ + TaskCreate, Field(description="Request body for creating a task.") + ], + dict[str, Any], ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> TaskCreateResponse: @@ -4996,8 +5177,11 @@ def create_task( def create_task_v1_with_http_info( self, - task_create: Annotated[ - TaskCreateV1, Field(description="Request body for creating a task.") + task_create: Union[ + Annotated[ + TaskCreateV1, Field(description="Request body for creating a task.") + ], + dict[str, Any], ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -5024,7 +5208,7 @@ def create_task_v1_with_http_info( verb=Verb.POST, path="/1/tasks", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -5032,8 +5216,11 @@ def create_task_v1_with_http_info( def create_task_v1( self, - task_create: Annotated[ - TaskCreateV1, Field(description="Request body for creating a task.") + task_create: Union[ + Annotated[ + TaskCreateV1, Field(description="Request body for creating a task.") + ], + dict[str, Any], ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> TaskCreateResponse: @@ -5051,9 +5238,12 @@ def create_task_v1( def create_transformation_with_http_info( self, - transformation_create: Annotated[ - TransformationCreate, - Field(description="Request body for creating a transformation."), + transformation_create: Union[ + Annotated[ + TransformationCreate, + Field(description="Request body for creating a transformation."), + ], + dict[str, Any], ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -5080,7 +5270,7 @@ def create_transformation_with_http_info( verb=Verb.POST, path="/1/transformations", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -5088,9 +5278,12 @@ def create_transformation_with_http_info( def create_transformation( self, - transformation_create: Annotated[ - TransformationCreate, - Field(description="Request body for creating a transformation."), + transformation_create: Union[ + Annotated[ + TransformationCreate, + Field(description="Request body for creating a transformation."), + ], + dict[str, Any], ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> TransformationCreateResponse: @@ -5139,11 +5332,11 @@ def custom_delete_with_http_info( "Parameter `path` is required when calling `custom_delete`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return self._transporter.request( verb=Verb.DELETE, @@ -5212,11 +5405,11 @@ def custom_get_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_get`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return self._transporter.request( verb=Verb.GET, @@ -5291,11 +5484,11 @@ def custom_post_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_post`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -5306,7 +5499,7 @@ def custom_post_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -5381,11 +5574,11 @@ def custom_put_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_put`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -5396,7 +5589,7 @@ def custom_put_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -6521,19 +6714,32 @@ def list_authentications_with_http_info( Optional[List[AuthenticationType]], Field(description="Type of authentication resource to retrieve."), ] = None, - platform: Annotated[ - Optional[List[PlatformWithNone]], - Field( - description="Ecommerce platform for which to retrieve authentications." - ), - ] = None, - sort: Annotated[ - Optional[AuthenticationSortKeys], - Field(description="Property by which to sort the list of authentications."), - ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + platform: Union[ + Annotated[ + Optional[List[PlatformWithNone]], + Field( + description="Ecommerce platform for which to retrieve authentications." + ), + ], + list[dict[str, Any]], + ] = None, + sort: Union[ + Annotated[ + Optional[AuthenticationSortKeys], + Field( + description="Property by which to sort the list of authentications." + ), + ], + str, + ] = None, + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -6561,20 +6767,20 @@ def list_authentications_with_http_info( :return: Returns the raw algoliasearch 'APIResponse' object. """ - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if items_per_page is not None: - _query_parameters.append(("itemsPerPage", items_per_page)) + _query_parameters["itemsPerPage"] = items_per_page if page is not None: - _query_parameters.append(("page", page)) + _query_parameters["page"] = page if type is not None: - _query_parameters.append(("type", type)) + _query_parameters["type"] = type if platform is not None: - _query_parameters.append(("platform", platform)) + _query_parameters["platform"] = platform if sort is not None: - _query_parameters.append(("sort", sort)) + _query_parameters["sort"] = sort if order is not None: - _query_parameters.append(("order", order)) + _query_parameters["order"] = order return self._transporter.request( verb=Verb.GET, @@ -6600,19 +6806,32 @@ def list_authentications( Optional[List[AuthenticationType]], Field(description="Type of authentication resource to retrieve."), ] = None, - platform: Annotated[ - Optional[List[PlatformWithNone]], - Field( - description="Ecommerce platform for which to retrieve authentications." - ), - ] = None, - sort: Annotated[ - Optional[AuthenticationSortKeys], - Field(description="Property by which to sort the list of authentications."), - ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + platform: Union[ + Annotated[ + Optional[List[PlatformWithNone]], + Field( + description="Ecommerce platform for which to retrieve authentications." + ), + ], + list[dict[str, Any]], + ] = None, + sort: Union[ + Annotated[ + Optional[AuthenticationSortKeys], + Field( + description="Property by which to sort the list of authentications." + ), + ], + str, + ] = None, + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ListAuthenticationsResponse: @@ -6665,13 +6884,21 @@ def list_destinations_with_http_info( Optional[StrictStr], Field(description="Get the list of destinations used by a transformation."), ] = None, - sort: Annotated[ - Optional[DestinationSortKeys], - Field(description="Property by which to sort the destinations."), + sort: Union[ + Annotated[ + Optional[DestinationSortKeys], + Field(description="Property by which to sort the destinations."), + ], + str, ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -6701,22 +6928,22 @@ def list_destinations_with_http_info( :return: Returns the raw algoliasearch 'APIResponse' object. """ - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if items_per_page is not None: - _query_parameters.append(("itemsPerPage", items_per_page)) + _query_parameters["itemsPerPage"] = items_per_page if page is not None: - _query_parameters.append(("page", page)) + _query_parameters["page"] = page if type is not None: - _query_parameters.append(("type", type)) + _query_parameters["type"] = type if authentication_id is not None: - _query_parameters.append(("authenticationID", authentication_id)) + _query_parameters["authenticationID"] = authentication_id if transformation_id is not None: - _query_parameters.append(("transformationID", transformation_id)) + _query_parameters["transformationID"] = transformation_id if sort is not None: - _query_parameters.append(("sort", sort)) + _query_parameters["sort"] = sort if order is not None: - _query_parameters.append(("order", order)) + _query_parameters["order"] = order return self._transporter.request( verb=Verb.GET, @@ -6749,13 +6976,21 @@ def list_destinations( Optional[StrictStr], Field(description="Get the list of destinations used by a transformation."), ] = None, - sort: Annotated[ - Optional[DestinationSortKeys], - Field(description="Property by which to sort the destinations."), + sort: Union[ + Annotated[ + Optional[DestinationSortKeys], + Field(description="Property by which to sort the destinations."), + ], + str, ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ListDestinationsResponse: @@ -6817,13 +7052,23 @@ def list_events_with_http_info( Optional[List[EventType]], Field(description="Event type for filtering the list of task runs."), ] = None, - sort: Annotated[ - Optional[EventSortKeys], - Field(description="Property by which to sort the list of task run events."), - ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + sort: Union[ + Annotated[ + Optional[EventSortKeys], + Field( + description="Property by which to sort the list of task run events." + ), + ], + str, + ] = None, + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, start_date: Annotated[ Optional[StrictStr], @@ -6874,24 +7119,24 @@ def list_events_with_http_info( "Parameter `run_id` is required when calling `list_events`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if items_per_page is not None: - _query_parameters.append(("itemsPerPage", items_per_page)) + _query_parameters["itemsPerPage"] = items_per_page if page is not None: - _query_parameters.append(("page", page)) + _query_parameters["page"] = page if status is not None: - _query_parameters.append(("status", status)) + _query_parameters["status"] = status if type is not None: - _query_parameters.append(("type", type)) + _query_parameters["type"] = type if sort is not None: - _query_parameters.append(("sort", sort)) + _query_parameters["sort"] = sort if order is not None: - _query_parameters.append(("order", order)) + _query_parameters["order"] = order if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date return self._transporter.request( verb=Verb.GET, @@ -6926,13 +7171,23 @@ def list_events( Optional[List[EventType]], Field(description="Event type for filtering the list of task runs."), ] = None, - sort: Annotated[ - Optional[EventSortKeys], - Field(description="Property by which to sort the list of task run events."), - ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + sort: Union[ + Annotated[ + Optional[EventSortKeys], + Field( + description="Property by which to sort the list of task run events." + ), + ], + str, + ] = None, + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, start_date: Annotated[ Optional[StrictStr], @@ -7013,13 +7268,21 @@ def list_runs_with_http_info( Optional[StrictStr], Field(description="Task ID for filtering the list of task runs."), ] = None, - sort: Annotated[ - Optional[RunSortKeys], - Field(description="Property by which to sort the list of task runs."), - ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + sort: Union[ + Annotated[ + Optional[RunSortKeys], + Field(description="Property by which to sort the list of task runs."), + ], + str, + ] = None, + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, start_date: Annotated[ Optional[StrictStr], @@ -7065,26 +7328,26 @@ def list_runs_with_http_info( :return: Returns the raw algoliasearch 'APIResponse' object. """ - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if items_per_page is not None: - _query_parameters.append(("itemsPerPage", items_per_page)) + _query_parameters["itemsPerPage"] = items_per_page if page is not None: - _query_parameters.append(("page", page)) + _query_parameters["page"] = page if status is not None: - _query_parameters.append(("status", status)) + _query_parameters["status"] = status if type is not None: - _query_parameters.append(("type", type)) + _query_parameters["type"] = type if task_id is not None: - _query_parameters.append(("taskID", task_id)) + _query_parameters["taskID"] = task_id if sort is not None: - _query_parameters.append(("sort", sort)) + _query_parameters["sort"] = sort if order is not None: - _query_parameters.append(("order", order)) + _query_parameters["order"] = order if start_date is not None: - _query_parameters.append(("startDate", start_date)) + _query_parameters["startDate"] = start_date if end_date is not None: - _query_parameters.append(("endDate", end_date)) + _query_parameters["endDate"] = end_date return self._transporter.request( verb=Verb.GET, @@ -7118,13 +7381,21 @@ def list_runs( Optional[StrictStr], Field(description="Task ID for filtering the list of task runs."), ] = None, - sort: Annotated[ - Optional[RunSortKeys], - Field(description="Property by which to sort the list of task runs."), - ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + sort: Union[ + Annotated[ + Optional[RunSortKeys], + Field(description="Property by which to sort the list of task runs."), + ], + str, + ] = None, + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, start_date: Annotated[ Optional[StrictStr], @@ -7203,13 +7474,21 @@ def list_sources_with_http_info( description="Authentication IDs of the sources to retrieve. 'none' returns sources that doesn't have an authentication. " ), ] = None, - sort: Annotated[ - Optional[SourceSortKeys], - Field(description="Property by which to sort the list of sources."), + sort: Union[ + Annotated[ + Optional[SourceSortKeys], + Field(description="Property by which to sort the list of sources."), + ], + str, ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -7237,20 +7516,20 @@ def list_sources_with_http_info( :return: Returns the raw algoliasearch 'APIResponse' object. """ - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if items_per_page is not None: - _query_parameters.append(("itemsPerPage", items_per_page)) + _query_parameters["itemsPerPage"] = items_per_page if page is not None: - _query_parameters.append(("page", page)) + _query_parameters["page"] = page if type is not None: - _query_parameters.append(("type", type)) + _query_parameters["type"] = type if authentication_id is not None: - _query_parameters.append(("authenticationID", authentication_id)) + _query_parameters["authenticationID"] = authentication_id if sort is not None: - _query_parameters.append(("sort", sort)) + _query_parameters["sort"] = sort if order is not None: - _query_parameters.append(("order", order)) + _query_parameters["order"] = order return self._transporter.request( verb=Verb.GET, @@ -7282,13 +7561,21 @@ def list_sources( description="Authentication IDs of the sources to retrieve. 'none' returns sources that doesn't have an authentication. " ), ] = None, - sort: Annotated[ - Optional[SourceSortKeys], - Field(description="Property by which to sort the list of sources."), + sort: Union[ + Annotated[ + Optional[SourceSortKeys], + Field(description="Property by which to sort the list of sources."), + ], + str, ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ListSourcesResponse: @@ -7352,13 +7639,21 @@ def list_tasks_with_http_info( Optional[List[TriggerType]], Field(description="Type of task trigger for filtering the list of tasks."), ] = None, - sort: Annotated[ - Optional[TaskSortKeys], - Field(description="Property by which to sort the list of tasks."), + sort: Union[ + Annotated[ + Optional[TaskSortKeys], + Field(description="Property by which to sort the list of tasks."), + ], + str, ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -7392,26 +7687,26 @@ def list_tasks_with_http_info( :return: Returns the raw algoliasearch 'APIResponse' object. """ - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if items_per_page is not None: - _query_parameters.append(("itemsPerPage", items_per_page)) + _query_parameters["itemsPerPage"] = items_per_page if page is not None: - _query_parameters.append(("page", page)) + _query_parameters["page"] = page if action is not None: - _query_parameters.append(("action", action)) + _query_parameters["action"] = action if enabled is not None: - _query_parameters.append(("enabled", enabled)) + _query_parameters["enabled"] = enabled if source_id is not None: - _query_parameters.append(("sourceID", source_id)) + _query_parameters["sourceID"] = source_id if destination_id is not None: - _query_parameters.append(("destinationID", destination_id)) + _query_parameters["destinationID"] = destination_id if trigger_type is not None: - _query_parameters.append(("triggerType", trigger_type)) + _query_parameters["triggerType"] = trigger_type if sort is not None: - _query_parameters.append(("sort", sort)) + _query_parameters["sort"] = sort if order is not None: - _query_parameters.append(("order", order)) + _query_parameters["order"] = order return self._transporter.request( verb=Verb.GET, @@ -7455,13 +7750,21 @@ def list_tasks( Optional[List[TriggerType]], Field(description="Type of task trigger for filtering the list of tasks."), ] = None, - sort: Annotated[ - Optional[TaskSortKeys], - Field(description="Property by which to sort the list of tasks."), + sort: Union[ + Annotated[ + Optional[TaskSortKeys], + Field(description="Property by which to sort the list of tasks."), + ], + str, ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ListTasksResponse: @@ -7540,13 +7843,21 @@ def list_tasks_v1_with_http_info( Optional[List[TriggerType]], Field(description="Type of task trigger for filtering the list of tasks."), ] = None, - sort: Annotated[ - Optional[TaskSortKeys], - Field(description="Property by which to sort the list of tasks."), + sort: Union[ + Annotated[ + Optional[TaskSortKeys], + Field(description="Property by which to sort the list of tasks."), + ], + str, ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -7580,26 +7891,26 @@ def list_tasks_v1_with_http_info( :return: Returns the raw algoliasearch 'APIResponse' object. """ - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if items_per_page is not None: - _query_parameters.append(("itemsPerPage", items_per_page)) + _query_parameters["itemsPerPage"] = items_per_page if page is not None: - _query_parameters.append(("page", page)) + _query_parameters["page"] = page if action is not None: - _query_parameters.append(("action", action)) + _query_parameters["action"] = action if enabled is not None: - _query_parameters.append(("enabled", enabled)) + _query_parameters["enabled"] = enabled if source_id is not None: - _query_parameters.append(("sourceID", source_id)) + _query_parameters["sourceID"] = source_id if destination_id is not None: - _query_parameters.append(("destinationID", destination_id)) + _query_parameters["destinationID"] = destination_id if trigger_type is not None: - _query_parameters.append(("triggerType", trigger_type)) + _query_parameters["triggerType"] = trigger_type if sort is not None: - _query_parameters.append(("sort", sort)) + _query_parameters["sort"] = sort if order is not None: - _query_parameters.append(("order", order)) + _query_parameters["order"] = order return self._transporter.request( verb=Verb.GET, @@ -7643,13 +7954,21 @@ def list_tasks_v1( Optional[List[TriggerType]], Field(description="Type of task trigger for filtering the list of tasks."), ] = None, - sort: Annotated[ - Optional[TaskSortKeys], - Field(description="Property by which to sort the list of tasks."), + sort: Union[ + Annotated[ + Optional[TaskSortKeys], + Field(description="Property by which to sort the list of tasks."), + ], + str, ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ListTasksResponseV1: @@ -7706,13 +8025,23 @@ def list_transformations_with_http_info( Optional[Annotated[int, Field(strict=True, ge=1)]], Field(description="Page number of the paginated API response."), ] = None, - sort: Annotated[ - Optional[TransformationSortKeys], - Field(description="Property by which to sort the list of transformations."), - ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + sort: Union[ + Annotated[ + Optional[TransformationSortKeys], + Field( + description="Property by which to sort the list of transformations." + ), + ], + str, + ] = None, + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -7736,16 +8065,16 @@ def list_transformations_with_http_info( :return: Returns the raw algoliasearch 'APIResponse' object. """ - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if items_per_page is not None: - _query_parameters.append(("itemsPerPage", items_per_page)) + _query_parameters["itemsPerPage"] = items_per_page if page is not None: - _query_parameters.append(("page", page)) + _query_parameters["page"] = page if sort is not None: - _query_parameters.append(("sort", sort)) + _query_parameters["sort"] = sort if order is not None: - _query_parameters.append(("order", order)) + _query_parameters["order"] = order return self._transporter.request( verb=Verb.GET, @@ -7767,13 +8096,23 @@ def list_transformations( Optional[Annotated[int, Field(strict=True, ge=1)]], Field(description="Page number of the paginated API response."), ] = None, - sort: Annotated[ - Optional[TransformationSortKeys], - Field(description="Property by which to sort the list of transformations."), - ] = None, - order: Annotated[ - Optional[OrderKeys], - Field(description="Sort order of the response, ascending or descending."), + sort: Union[ + Annotated[ + Optional[TransformationSortKeys], + Field( + description="Property by which to sort the list of transformations." + ), + ], + str, + ] = None, + order: Union[ + Annotated[ + Optional[OrderKeys], + Field( + description="Sort order of the response, ascending or descending." + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ListTransformationsResponse: @@ -7806,11 +8145,14 @@ def push_task_with_http_info( task_id: Annotated[ StrictStr, Field(description="Unique identifier of a task.") ], - push_task_payload: Annotated[ - PushTaskPayload, - Field( - description="Request body of a Search API `batch` request that will be pushed in the Connectors pipeline." - ), + push_task_payload: Union[ + Annotated[ + PushTaskPayload, + Field( + description="Request body of a Search API `batch` request that will be pushed in the Connectors pipeline." + ), + ], + dict[str, Any], ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -7850,7 +8192,7 @@ def push_task_with_http_info( "{taskID}", quote(str(task_id), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -7861,11 +8203,14 @@ def push_task( task_id: Annotated[ StrictStr, Field(description="Unique identifier of a task.") ], - push_task_payload: Annotated[ - PushTaskPayload, - Field( - description="Request body of a Search API `batch` request that will be pushed in the Connectors pipeline." - ), + push_task_payload: Union[ + Annotated[ + PushTaskPayload, + Field( + description="Request body of a Search API `batch` request that will be pushed in the Connectors pipeline." + ), + ], + dict[str, Any], ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> RunResponse: @@ -7894,7 +8239,7 @@ def run_source_with_http_info( source_id: Annotated[ StrictStr, Field(description="Unique identifier of a source.") ], - run_source_payload: Optional[RunSourcePayload] = None, + run_source_payload: Union[Optional[RunSourcePayload], dict[str, Any]] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -7928,7 +8273,7 @@ def run_source_with_http_info( "{sourceID}", quote(str(source_id), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -7939,7 +8284,7 @@ def run_source( source_id: Annotated[ StrictStr, Field(description="Unique identifier of a source.") ], - run_source_payload: Optional[RunSourcePayload] = None, + run_source_payload: Union[Optional[RunSourcePayload], dict[str, Any]] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> RunSourceResponse: """ @@ -8082,7 +8427,7 @@ def run_task_v1( def search_authentications_with_http_info( self, - authentication_search: AuthenticationSearch, + authentication_search: Union[AuthenticationSearch, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -8112,7 +8457,7 @@ def search_authentications_with_http_info( verb=Verb.POST, path="/1/authentications/search", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -8120,7 +8465,7 @@ def search_authentications_with_http_info( def search_authentications( self, - authentication_search: AuthenticationSearch, + authentication_search: Union[AuthenticationSearch, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> List[Authentication]: """ @@ -8143,7 +8488,7 @@ def search_authentications( def search_destinations_with_http_info( self, - destination_search: DestinationSearch, + destination_search: Union[DestinationSearch, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -8173,7 +8518,7 @@ def search_destinations_with_http_info( verb=Verb.POST, path="/1/destinations/search", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -8181,7 +8526,7 @@ def search_destinations_with_http_info( def search_destinations( self, - destination_search: DestinationSearch, + destination_search: Union[DestinationSearch, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> List[Destination]: """ @@ -8204,7 +8549,7 @@ def search_destinations( def search_sources_with_http_info( self, - source_search: SourceSearch, + source_search: Union[SourceSearch, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -8234,7 +8579,7 @@ def search_sources_with_http_info( verb=Verb.POST, path="/1/sources/search", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -8242,7 +8587,7 @@ def search_sources_with_http_info( def search_sources( self, - source_search: SourceSearch, + source_search: Union[SourceSearch, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> List[Source]: """ @@ -8263,7 +8608,7 @@ def search_sources( def search_tasks_with_http_info( self, - task_search: TaskSearch, + task_search: Union[TaskSearch, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -8293,7 +8638,7 @@ def search_tasks_with_http_info( verb=Verb.POST, path="/2/tasks/search", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -8301,7 +8646,7 @@ def search_tasks_with_http_info( def search_tasks( self, - task_search: TaskSearch, + task_search: Union[TaskSearch, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> List[Task]: """ @@ -8322,7 +8667,7 @@ def search_tasks( def search_tasks_v1_with_http_info( self, - task_search: TaskSearch, + task_search: Union[TaskSearch, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -8352,7 +8697,7 @@ def search_tasks_v1_with_http_info( verb=Verb.POST, path="/1/tasks/search", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -8360,7 +8705,7 @@ def search_tasks_v1_with_http_info( def search_tasks_v1( self, - task_search: TaskSearch, + task_search: Union[TaskSearch, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> List[TaskV1]: """ @@ -8381,7 +8726,7 @@ def search_tasks_v1( def search_transformations_with_http_info( self, - transformation_search: TransformationSearch, + transformation_search: Union[TransformationSearch, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -8411,7 +8756,7 @@ def search_transformations_with_http_info( verb=Verb.POST, path="/1/transformations/search", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -8419,7 +8764,7 @@ def search_transformations_with_http_info( def search_transformations( self, - transformation_search: TransformationSearch, + transformation_search: Union[TransformationSearch, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> List[Transformation]: """ @@ -8504,7 +8849,7 @@ def trigger_docker_source_discover( def try_transformation_with_http_info( self, - transformation_try: TransformationTry, + transformation_try: Union[TransformationTry, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -8534,7 +8879,7 @@ def try_transformation_with_http_info( verb=Verb.POST, path="/1/transformations/try", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -8542,7 +8887,7 @@ def try_transformation_with_http_info( def try_transformation( self, - transformation_try: TransformationTry, + transformation_try: Union[TransformationTry, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> TransformationTryResponse: """ @@ -8568,7 +8913,7 @@ def try_transformation_before_update_with_http_info( transformation_id: Annotated[ StrictStr, Field(description="Unique identifier of a transformation.") ], - transformation_try: TransformationTry, + transformation_try: Union[TransformationTry, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -8607,7 +8952,7 @@ def try_transformation_before_update_with_http_info( "{transformationID}", quote(str(transformation_id), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -8618,7 +8963,7 @@ def try_transformation_before_update( transformation_id: Annotated[ StrictStr, Field(description="Unique identifier of a transformation.") ], - transformation_try: TransformationTry, + transformation_try: Union[TransformationTry, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> TransformationTryResponse: """ @@ -8647,7 +8992,7 @@ def update_authentication_with_http_info( StrictStr, Field(description="Unique identifier of an authentication resource."), ], - authentication_update: AuthenticationUpdate, + authentication_update: Union[AuthenticationUpdate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -8686,7 +9031,7 @@ def update_authentication_with_http_info( "{authenticationID}", quote(str(authentication_id), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -8698,7 +9043,7 @@ def update_authentication( StrictStr, Field(description="Unique identifier of an authentication resource."), ], - authentication_update: AuthenticationUpdate, + authentication_update: Union[AuthenticationUpdate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> AuthenticationUpdateResponse: """ @@ -8726,7 +9071,7 @@ def update_destination_with_http_info( destination_id: Annotated[ StrictStr, Field(description="Unique identifier of a destination.") ], - destination_update: DestinationUpdate, + destination_update: Union[DestinationUpdate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -8765,7 +9110,7 @@ def update_destination_with_http_info( "{destinationID}", quote(str(destination_id), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -8776,7 +9121,7 @@ def update_destination( destination_id: Annotated[ StrictStr, Field(description="Unique identifier of a destination.") ], - destination_update: DestinationUpdate, + destination_update: Union[DestinationUpdate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> DestinationUpdateResponse: """ @@ -8804,7 +9149,7 @@ def update_source_with_http_info( source_id: Annotated[ StrictStr, Field(description="Unique identifier of a source.") ], - source_update: SourceUpdate, + source_update: Union[SourceUpdate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -8843,7 +9188,7 @@ def update_source_with_http_info( "{sourceID}", quote(str(source_id), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -8854,7 +9199,7 @@ def update_source( source_id: Annotated[ StrictStr, Field(description="Unique identifier of a source.") ], - source_update: SourceUpdate, + source_update: Union[SourceUpdate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SourceUpdateResponse: """ @@ -8882,7 +9227,7 @@ def update_task_with_http_info( task_id: Annotated[ StrictStr, Field(description="Unique identifier of a task.") ], - task_update: TaskUpdate, + task_update: Union[TaskUpdate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -8915,7 +9260,7 @@ def update_task_with_http_info( verb=Verb.PATCH, path="/2/tasks/{taskID}".replace("{taskID}", quote(str(task_id), safe="")), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -8926,7 +9271,7 @@ def update_task( task_id: Annotated[ StrictStr, Field(description="Unique identifier of a task.") ], - task_update: TaskUpdate, + task_update: Union[TaskUpdate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> TaskUpdateResponse: """ @@ -8948,7 +9293,7 @@ def update_task_v1_with_http_info( task_id: Annotated[ StrictStr, Field(description="Unique identifier of a task.") ], - task_update: TaskUpdateV1, + task_update: Union[TaskUpdateV1, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -8981,7 +9326,7 @@ def update_task_v1_with_http_info( verb=Verb.PATCH, path="/1/tasks/{taskID}".replace("{taskID}", quote(str(task_id), safe="")), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -8992,7 +9337,7 @@ def update_task_v1( task_id: Annotated[ StrictStr, Field(description="Unique identifier of a task.") ], - task_update: TaskUpdateV1, + task_update: Union[TaskUpdateV1, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> TaskUpdateResponse: """ @@ -9014,7 +9359,7 @@ def update_transformation_with_http_info( transformation_id: Annotated[ StrictStr, Field(description="Unique identifier of a transformation.") ], - transformation_create: TransformationCreate, + transformation_create: Union[TransformationCreate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -9049,7 +9394,7 @@ def update_transformation_with_http_info( "{transformationID}", quote(str(transformation_id), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -9060,7 +9405,7 @@ def update_transformation( transformation_id: Annotated[ StrictStr, Field(description="Unique identifier of a transformation.") ], - transformation_create: TransformationCreate, + transformation_create: Union[TransformationCreate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> TransformationUpdateResponse: """ @@ -9081,7 +9426,7 @@ def update_transformation( def validate_source_with_http_info( self, - source_create: Optional[SourceCreate] = None, + source_create: Union[Optional[SourceCreate], dict[str, Any]] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -9106,7 +9451,7 @@ def validate_source_with_http_info( verb=Verb.POST, path="/1/sources/validate", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -9114,7 +9459,7 @@ def validate_source_with_http_info( def validate_source( self, - source_create: Optional[SourceCreate] = None, + source_create: Union[Optional[SourceCreate], dict[str, Any]] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SourceWatchResponse: """ @@ -9138,7 +9483,7 @@ def validate_source_before_update_with_http_info( source_id: Annotated[ StrictStr, Field(description="Unique identifier of a source.") ], - source_update: SourceUpdate, + source_update: Union[SourceUpdate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -9177,7 +9522,7 @@ def validate_source_before_update_with_http_info( "{sourceID}", quote(str(source_id), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -9188,7 +9533,7 @@ def validate_source_before_update( source_id: Annotated[ StrictStr, Field(description="Unique identifier of a source.") ], - source_update: SourceUpdate, + source_update: Union[SourceUpdate, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SourceWatchResponse: """ diff --git a/algoliasearch/ingestion/config.py b/algoliasearch/ingestion/config.py index cfc82a533..225a31e3f 100644 --- a/algoliasearch/ingestion/config.py +++ b/algoliasearch/ingestion/config.py @@ -1,4 +1,5 @@ from os import environ +from typing import Optional from algoliasearch.http.base_config import BaseConfig from algoliasearch.http.hosts import Host, HostsCollection @@ -6,11 +7,16 @@ class IngestionConfig(BaseConfig): - def __init__(self, app_id: str, api_key: str, region: str = None) -> None: + def __init__( + self, app_id: Optional[str], api_key: Optional[str], region: str = "" + ) -> None: super().__init__(app_id, api_key) user_agent = UserAgent().add("Ingestion") + assert app_id, "`app_id` is missing." + assert api_key, "`api_key` is missing." + self.headers = { "x-algolia-application-id": app_id, "x-algolia-api-key": api_key, @@ -37,5 +43,5 @@ def __init__(self, app_id: str, api_key: str, region: str = None) -> None: ) self.hosts = HostsCollection( - [Host("data.{region}.algolia.com".replace("{region}", region))] + [Host("data.{region}.algolia.com".replace("{region}", region or ""))] ) diff --git a/algoliasearch/ingestion/models/auth_algolia.py b/algoliasearch/ingestion/models/auth_algolia.py index 5c1c0d03b..c6dfd8a68 100644 --- a/algoliasearch/ingestion/models/auth_algolia.py +++ b/algoliasearch/ingestion/models/auth_algolia.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "app_id": "appID", + "api_key": "apiKey", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class AuthAlgolia(BaseModel): """ Credentials for authenticating with Algolia. """ - app_id: str = Field(alias="appID") + app_id: str """ Algolia application ID. """ - api_key: str = Field(alias="apiKey") + api_key: str """ Algolia API key with the ACL: `addObject`, `deleteObject`, `settings`, `editSettings`, `listIndexes`, `deleteIndex`. This field is `null` in the API response. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class AuthAlgolia(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/auth_algolia_insights.py b/algoliasearch/ingestion/models/auth_algolia_insights.py index 6b67ae1a9..7b9f126a7 100644 --- a/algoliasearch/ingestion/models/auth_algolia_insights.py +++ b/algoliasearch/ingestion/models/auth_algolia_insights.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "app_id": "appID", + "api_key": "apiKey", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class AuthAlgoliaInsights(BaseModel): """ Credentials for authenticating with the Algolia Insights API. """ - app_id: str = Field(alias="appID") + app_id: str """ Algolia application ID. """ - api_key: str = Field(alias="apiKey") + api_key: str """ Algolia API key with the ACL: `search`. This field is `null` in the API response. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class AuthAlgoliaInsights(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/auth_algolia_insights_partial.py b/algoliasearch/ingestion/models/auth_algolia_insights_partial.py index 3c2136969..c8a5d582c 100644 --- a/algoliasearch/ingestion/models/auth_algolia_insights_partial.py +++ b/algoliasearch/ingestion/models/auth_algolia_insights_partial.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "app_id": "appID", + "api_key": "apiKey", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class AuthAlgoliaInsightsPartial(BaseModel): """ Credentials for authenticating with the Algolia Insights API. """ - app_id: Optional[str] = Field(default=None, alias="appID") + app_id: Optional[str] = None """ Algolia application ID. """ - api_key: Optional[str] = Field(default=None, alias="apiKey") + api_key: Optional[str] = None """ Algolia API key with the ACL: `search`. This field is `null` in the API response. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class AuthAlgoliaInsightsPartial(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/auth_algolia_partial.py b/algoliasearch/ingestion/models/auth_algolia_partial.py index ed12a3e1a..1efb3d211 100644 --- a/algoliasearch/ingestion/models/auth_algolia_partial.py +++ b/algoliasearch/ingestion/models/auth_algolia_partial.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "app_id": "appID", + "api_key": "apiKey", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class AuthAlgoliaPartial(BaseModel): """ Credentials for authenticating with Algolia. """ - app_id: Optional[str] = Field(default=None, alias="appID") + app_id: Optional[str] = None """ Algolia application ID. """ - api_key: Optional[str] = Field(default=None, alias="apiKey") + api_key: Optional[str] = None """ Algolia API key with the ACL: `addObject`, `deleteObject`, `settings`, `editSettings`, `listIndexes`, `deleteIndex`. This field is `null` in the API response. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class AuthAlgoliaPartial(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/auth_api_key.py b/algoliasearch/ingestion/models/auth_api_key.py index f36fa8df3..e46e52963 100644 --- a/algoliasearch/ingestion/models/auth_api_key.py +++ b/algoliasearch/ingestion/models/auth_api_key.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "key": "key", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class AuthAPIKey(BaseModel): """ Credentials for authenticating with an API key. """ - key: str = Field(alias="key") + key: str """ API key. This field is `null` in the API response. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class AuthAPIKey(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/auth_api_key_partial.py b/algoliasearch/ingestion/models/auth_api_key_partial.py index e8e4177e0..65afa7d71 100644 --- a/algoliasearch/ingestion/models/auth_api_key_partial.py +++ b/algoliasearch/ingestion/models/auth_api_key_partial.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "key": "key", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class AuthAPIKeyPartial(BaseModel): """ Credentials for authenticating with an API key. """ - key: Optional[str] = Field(default=None, alias="key") + key: Optional[str] = None """ API key. This field is `null` in the API response. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class AuthAPIKeyPartial(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/auth_basic.py b/algoliasearch/ingestion/models/auth_basic.py index efdd3cbea..e8879e496 100644 --- a/algoliasearch/ingestion/models/auth_basic.py +++ b/algoliasearch/ingestion/models/auth_basic.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "username": "username", + "password": "password", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class AuthBasic(BaseModel): """ Credentials for authenticating with user name and password. """ - username: str = Field(alias="username") + username: str """ Username. """ - password: str = Field(alias="password") + password: str """ Password. This field is `null` in the API response. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class AuthBasic(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/auth_basic_partial.py b/algoliasearch/ingestion/models/auth_basic_partial.py index 850450c60..527e4d3ba 100644 --- a/algoliasearch/ingestion/models/auth_basic_partial.py +++ b/algoliasearch/ingestion/models/auth_basic_partial.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "username": "username", + "password": "password", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class AuthBasicPartial(BaseModel): """ Credentials for authenticating with user name and password. """ - username: Optional[str] = Field(default=None, alias="username") + username: Optional[str] = None """ Username. """ - password: Optional[str] = Field(default=None, alias="password") + password: Optional[str] = None """ Password. This field is `null` in the API response. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class AuthBasicPartial(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/auth_google_service_account.py b/algoliasearch/ingestion/models/auth_google_service_account.py index 2f14251b3..fbfd9381f 100644 --- a/algoliasearch/ingestion/models/auth_google_service_account.py +++ b/algoliasearch/ingestion/models/auth_google_service_account.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "client_email": "clientEmail", + "private_key": "privateKey", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class AuthGoogleServiceAccount(BaseModel): """ Credentials for authenticating with a Google service account, such as BigQuery. """ - client_email: str = Field(alias="clientEmail") + client_email: str """ Email address of the Google service account. """ - private_key: str = Field(alias="privateKey") + private_key: str """ Private key of the Google service account. This field is `null` in the API response. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class AuthGoogleServiceAccount(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/auth_google_service_account_partial.py b/algoliasearch/ingestion/models/auth_google_service_account_partial.py index 7f75f9818..c89f51b09 100644 --- a/algoliasearch/ingestion/models/auth_google_service_account_partial.py +++ b/algoliasearch/ingestion/models/auth_google_service_account_partial.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "client_email": "clientEmail", + "private_key": "privateKey", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class AuthGoogleServiceAccountPartial(BaseModel): """ Credentials for authenticating with a Google service account, such as BigQuery. """ - client_email: Optional[str] = Field(default=None, alias="clientEmail") + client_email: Optional[str] = None """ Email address of the Google service account. """ - private_key: Optional[str] = Field(default=None, alias="privateKey") + private_key: Optional[str] = None """ Private key of the Google service account. This field is `null` in the API response. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class AuthGoogleServiceAccountPartial(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/auth_input.py b/algoliasearch/ingestion/models/auth_input.py index 63fcdffa5..74a044795 100644 --- a/algoliasearch/ingestion/models/auth_input.py +++ b/algoliasearch/ingestion/models/auth_input.py @@ -45,15 +45,14 @@ class AuthInput(BaseModel): oneof_schema_6_validator: Optional[AuthAlgoliaInsights] = Field(default=None) - actual_instance: Optional[ - Union[ - AuthAPIKey, - AuthAlgolia, - AuthAlgoliaInsights, - AuthBasic, - AuthGoogleServiceAccount, - AuthOAuth, - ] + actual_instance: Union[ + AuthAPIKey, + AuthAlgolia, + AuthAlgoliaInsights, + AuthBasic, + AuthGoogleServiceAccount, + AuthOAuth, + None, ] = None one_of_schemas: Set[str] = { "AuthAPIKey", @@ -74,22 +73,22 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer def unwrap_actual_instance( self, - ) -> Optional[ - Union[ - AuthAPIKey, - AuthAlgolia, - AuthAlgoliaInsights, - AuthBasic, - AuthGoogleServiceAccount, - AuthOAuth, - ] + ) -> Union[ + AuthAPIKey, + AuthAlgolia, + AuthAlgoliaInsights, + AuthBasic, + AuthGoogleServiceAccount, + AuthOAuth, + Self, + None, ]: """ Unwraps the `actual_instance` when calling the `to_json` method. @@ -155,9 +154,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -179,8 +178,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/ingestion/models/auth_input_partial.py b/algoliasearch/ingestion/models/auth_input_partial.py index b792f1840..c3cafb115 100644 --- a/algoliasearch/ingestion/models/auth_input_partial.py +++ b/algoliasearch/ingestion/models/auth_input_partial.py @@ -49,15 +49,14 @@ class AuthInputPartial(BaseModel): oneof_schema_6_validator: Optional[AuthAlgoliaInsightsPartial] = Field(default=None) - actual_instance: Optional[ - Union[ - AuthAPIKeyPartial, - AuthAlgoliaInsightsPartial, - AuthAlgoliaPartial, - AuthBasicPartial, - AuthGoogleServiceAccountPartial, - AuthOAuthPartial, - ] + actual_instance: Union[ + AuthAPIKeyPartial, + AuthAlgoliaInsightsPartial, + AuthAlgoliaPartial, + AuthBasicPartial, + AuthGoogleServiceAccountPartial, + AuthOAuthPartial, + None, ] = None one_of_schemas: Set[str] = { "AuthAPIKeyPartial", @@ -78,22 +77,22 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer def unwrap_actual_instance( self, - ) -> Optional[ - Union[ - AuthAPIKeyPartial, - AuthAlgoliaInsightsPartial, - AuthAlgoliaPartial, - AuthBasicPartial, - AuthGoogleServiceAccountPartial, - AuthOAuthPartial, - ] + ) -> Union[ + AuthAPIKeyPartial, + AuthAlgoliaInsightsPartial, + AuthAlgoliaPartial, + AuthBasicPartial, + AuthGoogleServiceAccountPartial, + AuthOAuthPartial, + Self, + None, ]: """ Unwraps the `actual_instance` when calling the `to_json` method. @@ -161,9 +160,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -185,8 +184,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/ingestion/models/auth_o_auth.py b/algoliasearch/ingestion/models/auth_o_auth.py index 990556ded..c9e7df6c8 100644 --- a/algoliasearch/ingestion/models/auth_o_auth.py +++ b/algoliasearch/ingestion/models/auth_o_auth.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,30 @@ from typing_extensions import Self +_ALIASES = { + "url": "url", + "client_id": "client_id", + "client_secret": "client_secret", + "scope": "scope", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class AuthOAuth(BaseModel): """ Credentials for authenticating with OAuth 2.0. """ - url: str = Field(alias="url") + url: str """ URL for the OAuth endpoint. """ - client_id: str = Field(alias="client_id") + client_id: str """ Client ID. """ - client_secret: str = Field(alias="client_secret") + client_secret: str """ Client secret. This field is `null` in the API response. """ - scope: Optional[str] = Field(default=None, alias="scope") + scope: Optional[str] = None """ OAuth scope. """ model_config = ConfigDict( @@ -37,6 +49,7 @@ class AuthOAuth(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/auth_o_auth_partial.py b/algoliasearch/ingestion/models/auth_o_auth_partial.py index 75b8ddaef..56c477685 100644 --- a/algoliasearch/ingestion/models/auth_o_auth_partial.py +++ b/algoliasearch/ingestion/models/auth_o_auth_partial.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,30 @@ from typing_extensions import Self +_ALIASES = { + "url": "url", + "client_id": "client_id", + "client_secret": "client_secret", + "scope": "scope", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class AuthOAuthPartial(BaseModel): """ Credentials for authenticating with OAuth 2.0. """ - url: Optional[str] = Field(default=None, alias="url") + url: Optional[str] = None """ URL for the OAuth endpoint. """ - client_id: Optional[str] = Field(default=None, alias="client_id") + client_id: Optional[str] = None """ Client ID. """ - client_secret: Optional[str] = Field(default=None, alias="client_secret") + client_secret: Optional[str] = None """ Client secret. This field is `null` in the API response. """ - scope: Optional[str] = Field(default=None, alias="scope") + scope: Optional[str] = None """ OAuth scope. """ model_config = ConfigDict( @@ -37,6 +49,7 @@ class AuthOAuthPartial(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/authentication.py b/algoliasearch/ingestion/models/authentication.py index 377554d00..c636b8be5 100644 --- a/algoliasearch/ingestion/models/authentication.py +++ b/algoliasearch/ingestion/models/authentication.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,22 +22,36 @@ from algoliasearch.ingestion.models.authentication_type import AuthenticationType from algoliasearch.ingestion.models.platform import Platform +_ALIASES = { + "authentication_id": "authenticationID", + "type": "type", + "name": "name", + "platform": "platform", + "input": "input", + "created_at": "createdAt", + "updated_at": "updatedAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class Authentication(BaseModel): """ Resource representing the information required to authenticate with a source or a destination. """ - authentication_id: str = Field(alias="authenticationID") + authentication_id: str """ Universally unique identifier (UUID) of an authentication resource. """ - type: AuthenticationType = Field(alias="type") - name: str = Field(alias="name") + type: AuthenticationType + name: str """ Descriptive name for the resource. """ - platform: Optional[Platform] = Field(default=None, alias="platform") - input: AuthInputPartial = Field(alias="input") - created_at: str = Field(alias="createdAt") + platform: Optional[Platform] = None + input: AuthInputPartial + created_at: str """ Date of creation in RFC 3339 format. """ - updated_at: Optional[str] = Field(default=None, alias="updatedAt") + updated_at: Optional[str] = None """ Date of last update in RFC 3339 format. """ model_config = ConfigDict( @@ -45,6 +59,7 @@ class Authentication(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/authentication_create.py b/algoliasearch/ingestion/models/authentication_create.py index 9b71c6c31..43ad4e5d6 100644 --- a/algoliasearch/ingestion/models/authentication_create.py +++ b/algoliasearch/ingestion/models/authentication_create.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,23 +22,35 @@ from algoliasearch.ingestion.models.authentication_type import AuthenticationType from algoliasearch.ingestion.models.platform import Platform +_ALIASES = { + "type": "type", + "name": "name", + "platform": "platform", + "input": "input", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class AuthenticationCreate(BaseModel): """ Request body for creating a new authentication resource. """ - type: AuthenticationType = Field(alias="type") - name: str = Field(alias="name") + type: AuthenticationType + name: str """ Descriptive name for the resource. """ - platform: Optional[Platform] = Field(default=None, alias="platform") - input: AuthInput = Field(alias="input") + platform: Optional[Platform] = None + input: AuthInput model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/authentication_create_response.py b/algoliasearch/ingestion/models/authentication_create_response.py index 2c46772a5..2bc641e75 100644 --- a/algoliasearch/ingestion/models/authentication_create_response.py +++ b/algoliasearch/ingestion/models/authentication_create_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "authentication_id": "authenticationID", + "name": "name", + "created_at": "createdAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class AuthenticationCreateResponse(BaseModel): """ API response for the successful creation of an authentication resource. """ - authentication_id: str = Field(alias="authenticationID") + authentication_id: str """ Universally unique identifier (UUID) of an authentication resource. """ - name: str = Field(alias="name") + name: str """ Descriptive name for the resource. """ - created_at: str = Field(alias="createdAt") + created_at: str """ Date of creation in RFC 3339 format. """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class AuthenticationCreateResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/authentication_search.py b/algoliasearch/ingestion/models/authentication_search.py index cd87f5c92..956e2631b 100644 --- a/algoliasearch/ingestion/models/authentication_search.py +++ b/algoliasearch/ingestion/models/authentication_search.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,28 @@ from typing_extensions import Self +_ALIASES = { + "authentication_ids": "authenticationIDs", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class AuthenticationSearch(BaseModel): """ Request body for searching for authentication resources. """ - authentication_ids: List[str] = Field(alias="authenticationIDs") + authentication_ids: List[str] model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/authentication_update.py b/algoliasearch/ingestion/models/authentication_update.py index c888e32c7..0d0877ccd 100644 --- a/algoliasearch/ingestion/models/authentication_update.py +++ b/algoliasearch/ingestion/models/authentication_update.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,23 +22,35 @@ from algoliasearch.ingestion.models.authentication_type import AuthenticationType from algoliasearch.ingestion.models.platform import Platform +_ALIASES = { + "type": "type", + "name": "name", + "platform": "platform", + "input": "input", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class AuthenticationUpdate(BaseModel): """ Request body for updating an authentication resource. """ - type: Optional[AuthenticationType] = Field(default=None, alias="type") - name: Optional[str] = Field(default=None, alias="name") + type: Optional[AuthenticationType] = None + name: Optional[str] = None """ Descriptive name for the resource. """ - platform: Optional[Platform] = Field(default=None, alias="platform") - input: Optional[AuthInputPartial] = Field(default=None, alias="input") + platform: Optional[Platform] = None + input: Optional[AuthInputPartial] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/authentication_update_response.py b/algoliasearch/ingestion/models/authentication_update_response.py index 055d1e6e2..712ca10db 100644 --- a/algoliasearch/ingestion/models/authentication_update_response.py +++ b/algoliasearch/ingestion/models/authentication_update_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "authentication_id": "authenticationID", + "name": "name", + "updated_at": "updatedAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class AuthenticationUpdateResponse(BaseModel): """ API response for a successful update of an authentication resource. """ - authentication_id: str = Field(alias="authenticationID") + authentication_id: str """ Universally unique identifier (UUID) of an authentication resource. """ - name: str = Field(alias="name") + name: str """ Descriptive name for the resource. """ - updated_at: str = Field(alias="updatedAt") + updated_at: str """ Date of last update in RFC 3339 format. """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class AuthenticationUpdateResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/big_commerce_channel.py b/algoliasearch/ingestion/models/big_commerce_channel.py index 788306b4d..ceea45a63 100644 --- a/algoliasearch/ingestion/models/big_commerce_channel.py +++ b/algoliasearch/ingestion/models/big_commerce_channel.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "id": "id", + "currencies": "currencies", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class BigCommerceChannel(BaseModel): """ BigCommerceChannel """ - id: int = Field(alias="id") + id: int """ ID of the BigCommerce channel. """ - currencies: Optional[List[str]] = Field(default=None, alias="currencies") + currencies: Optional[List[str]] = None """ Currencies for the given channel. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class BigCommerceChannel(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/big_commerce_metafield.py b/algoliasearch/ingestion/models/big_commerce_metafield.py index 7adb8f177..70690a262 100644 --- a/algoliasearch/ingestion/models/big_commerce_metafield.py +++ b/algoliasearch/ingestion/models/big_commerce_metafield.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "namespace": "namespace", + "key": "key", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class BigCommerceMetafield(BaseModel): """ BigCommerceMetafield """ - namespace: str = Field(alias="namespace") + namespace: str """ Namespace of the metafield. """ - key: str = Field(alias="key") + key: str """ Key identifier of the metafield. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class BigCommerceMetafield(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/commercetools_custom_fields.py b/algoliasearch/ingestion/models/commercetools_custom_fields.py index 5d5c1e4f1..38b7c5887 100644 --- a/algoliasearch/ingestion/models/commercetools_custom_fields.py +++ b/algoliasearch/ingestion/models/commercetools_custom_fields.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "inventory": "inventory", + "price": "price", + "category": "category", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class CommercetoolsCustomFields(BaseModel): """ Custom fields from commercetools to add to the records. For more information, see [Using Custom Types and Custom Fields](https://docs.commercetools.com/tutorials/custom-types). """ - inventory: Optional[List[str]] = Field(default=None, alias="inventory") + inventory: Optional[List[str]] = None """ Inventory custom fields. """ - price: Optional[List[str]] = Field(default=None, alias="price") + price: Optional[List[str]] = None """ Price custom fields. """ - category: Optional[List[str]] = Field(default=None, alias="category") + category: Optional[List[str]] = None """ Category custom fields. """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class CommercetoolsCustomFields(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/delete_response.py b/algoliasearch/ingestion/models/delete_response.py index 006891723..991f66d13 100644 --- a/algoliasearch/ingestion/models/delete_response.py +++ b/algoliasearch/ingestion/models/delete_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "deleted_at": "deletedAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class DeleteResponse(BaseModel): """ DeleteResponse """ - deleted_at: str = Field(alias="deletedAt") + deleted_at: str """ Date of deletion in RFC 3339 format. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class DeleteResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/destination.py b/algoliasearch/ingestion/models/destination.py index f67a75491..9a4c316ba 100644 --- a/algoliasearch/ingestion/models/destination.py +++ b/algoliasearch/ingestion/models/destination.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,33 +21,47 @@ from algoliasearch.ingestion.models.destination_input import DestinationInput from algoliasearch.ingestion.models.destination_type import DestinationType +_ALIASES = { + "destination_id": "destinationID", + "type": "type", + "name": "name", + "input": "input", + "created_at": "createdAt", + "updated_at": "updatedAt", + "authentication_id": "authenticationID", + "transformation_ids": "transformationIDs", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class Destination(BaseModel): """ Destinations are Algolia resources like indices or event streams. """ - destination_id: str = Field(alias="destinationID") + destination_id: str """ Universally unique identifier (UUID) of a destination resource. """ - type: DestinationType = Field(alias="type") - name: str = Field(alias="name") + type: DestinationType + name: str """ Descriptive name for the resource. """ - input: DestinationInput = Field(alias="input") - created_at: str = Field(alias="createdAt") + input: DestinationInput + created_at: str """ Date of creation in RFC 3339 format. """ - updated_at: Optional[str] = Field(default=None, alias="updatedAt") + updated_at: Optional[str] = None """ Date of last update in RFC 3339 format. """ - authentication_id: Optional[str] = Field(default=None, alias="authenticationID") + authentication_id: Optional[str] = None """ Universally unique identifier (UUID) of an authentication resource. """ - transformation_ids: Optional[List[str]] = Field( - default=None, alias="transformationIDs" - ) + transformation_ids: Optional[List[str]] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/destination_create.py b/algoliasearch/ingestion/models/destination_create.py index a8ebc29f2..2d8f3a439 100644 --- a/algoliasearch/ingestion/models/destination_create.py +++ b/algoliasearch/ingestion/models/destination_create.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,27 +21,38 @@ from algoliasearch.ingestion.models.destination_input import DestinationInput from algoliasearch.ingestion.models.destination_type import DestinationType +_ALIASES = { + "type": "type", + "name": "name", + "input": "input", + "authentication_id": "authenticationID", + "transformation_ids": "transformationIDs", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class DestinationCreate(BaseModel): """ API request body for creating a new destination. """ - type: DestinationType = Field(alias="type") - name: str = Field(alias="name") + type: DestinationType + name: str """ Descriptive name for the resource. """ - input: DestinationInput = Field(alias="input") - authentication_id: Optional[str] = Field(default=None, alias="authenticationID") + input: DestinationInput + authentication_id: Optional[str] = None """ Universally unique identifier (UUID) of an authentication resource. """ - transformation_ids: Optional[List[str]] = Field( - default=None, alias="transformationIDs" - ) + transformation_ids: Optional[List[str]] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/destination_create_response.py b/algoliasearch/ingestion/models/destination_create_response.py index 3672a4a99..d5a9be66b 100644 --- a/algoliasearch/ingestion/models/destination_create_response.py +++ b/algoliasearch/ingestion/models/destination_create_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "destination_id": "destinationID", + "name": "name", + "created_at": "createdAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class DestinationCreateResponse(BaseModel): """ API response for creating a new destination. """ - destination_id: str = Field(alias="destinationID") + destination_id: str """ Universally unique identifier (UUID) of a destination resource. """ - name: str = Field(alias="name") + name: str """ Descriptive name for the resource. """ - created_at: str = Field(alias="createdAt") + created_at: str """ Date of creation in RFC 3339 format. """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class DestinationCreateResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/destination_index_name.py b/algoliasearch/ingestion/models/destination_index_name.py index b4cdd6bb8..24663ee60 100644 --- a/algoliasearch/ingestion/models/destination_index_name.py +++ b/algoliasearch/ingestion/models/destination_index_name.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,18 +20,26 @@ from algoliasearch.ingestion.models.record_type import RecordType +_ALIASES = { + "index_name": "indexName", + "record_type": "recordType", + "attributes_to_exclude": "attributesToExclude", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class DestinationIndexName(BaseModel): """ DestinationIndexName """ - index_name: str = Field(alias="indexName") + index_name: str """ Algolia index name (case-sensitive). """ - record_type: Optional[RecordType] = Field(default=None, alias="recordType") - attributes_to_exclude: Optional[List[str]] = Field( - default=None, alias="attributesToExclude" - ) + record_type: Optional[RecordType] = None + attributes_to_exclude: Optional[List[str]] = None """ Attributes from your source to exclude from Algolia records. Not all your data attributes will be useful for searching. Keeping your Algolia records small increases indexing and search performance. - Exclude nested attributes with `.` notation. For example, `foo.bar` indexes the `foo` attribute and all its children **except** the `bar` attribute. - Exclude attributes from arrays with `[i]`, where `i` is the index of the array element. For example, `foo.[0].bar` only excludes the `bar` attribute from the first element of the `foo` array, but indexes the complete `foo` attribute for all other elements. Use `*` as wildcard: `foo.[*].bar` excludes `bar` from all elements of the `foo` array. """ model_config = ConfigDict( @@ -39,6 +47,7 @@ class DestinationIndexName(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/destination_input.py b/algoliasearch/ingestion/models/destination_input.py index 8bed22f89..464813e18 100644 --- a/algoliasearch/ingestion/models/destination_input.py +++ b/algoliasearch/ingestion/models/destination_input.py @@ -28,7 +28,7 @@ class DestinationInput(BaseModel): oneof_schema_1_validator: Optional[DestinationIndexName] = Field(default=None) - actual_instance: Optional[Union[DestinationIndexName]] = None + actual_instance: Union[DestinationIndexName, None] = None one_of_schemas: Set[str] = {"DestinationIndexName"} def __init__(self, *args, **kwargs) -> None: @@ -41,12 +41,12 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[DestinationIndexName]]: + def unwrap_actual_instance(self) -> Union[DestinationIndexName, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -81,9 +81,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -93,8 +93,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], DestinationIndexName]]: return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/ingestion/models/destination_search.py b/algoliasearch/ingestion/models/destination_search.py index de77cbdce..3e1b7256c 100644 --- a/algoliasearch/ingestion/models/destination_search.py +++ b/algoliasearch/ingestion/models/destination_search.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,28 @@ from typing_extensions import Self +_ALIASES = { + "destination_ids": "destinationIDs", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class DestinationSearch(BaseModel): """ API request body for searching destinations. """ - destination_ids: List[str] = Field(alias="destinationIDs") + destination_ids: List[str] model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/destination_update.py b/algoliasearch/ingestion/models/destination_update.py index bfdb77327..26fc9aa63 100644 --- a/algoliasearch/ingestion/models/destination_update.py +++ b/algoliasearch/ingestion/models/destination_update.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,27 +21,38 @@ from algoliasearch.ingestion.models.destination_input import DestinationInput from algoliasearch.ingestion.models.destination_type import DestinationType +_ALIASES = { + "type": "type", + "name": "name", + "input": "input", + "authentication_id": "authenticationID", + "transformation_ids": "transformationIDs", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class DestinationUpdate(BaseModel): """ API request body for updating a destination. """ - type: Optional[DestinationType] = Field(default=None, alias="type") - name: Optional[str] = Field(default=None, alias="name") + type: Optional[DestinationType] = None + name: Optional[str] = None """ Descriptive name for the resource. """ - input: Optional[DestinationInput] = Field(default=None, alias="input") - authentication_id: Optional[str] = Field(default=None, alias="authenticationID") + input: Optional[DestinationInput] = None + authentication_id: Optional[str] = None """ Universally unique identifier (UUID) of an authentication resource. """ - transformation_ids: Optional[List[str]] = Field( - default=None, alias="transformationIDs" - ) + transformation_ids: Optional[List[str]] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/destination_update_response.py b/algoliasearch/ingestion/models/destination_update_response.py index 4f9ad8fd5..d0e764431 100644 --- a/algoliasearch/ingestion/models/destination_update_response.py +++ b/algoliasearch/ingestion/models/destination_update_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "destination_id": "destinationID", + "name": "name", + "updated_at": "updatedAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class DestinationUpdateResponse(BaseModel): """ API response for updating a destination. """ - destination_id: str = Field(alias="destinationID") + destination_id: str """ Universally unique identifier (UUID) of a destination resource. """ - name: str = Field(alias="name") + name: str """ Descriptive name for the resource. """ - updated_at: str = Field(alias="updatedAt") + updated_at: str """ Date of last update in RFC 3339 format. """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class DestinationUpdateResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/docker_streams.py b/algoliasearch/ingestion/models/docker_streams.py index 7bf6aecd9..954fa69e6 100644 --- a/algoliasearch/ingestion/models/docker_streams.py +++ b/algoliasearch/ingestion/models/docker_streams.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,23 +22,34 @@ DockerStreamsSyncMode, ) +_ALIASES = { + "name": "name", + "properties": "properties", + "sync_mode": "syncMode", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class DockerStreams(BaseModel): """ DockerStreams """ - name: str = Field(alias="name") + name: str """ The name of the stream to fetch the data from (e.g. table name). """ - properties: Optional[List[str]] = Field(default=None, alias="properties") + properties: Optional[List[str]] = None """ The properties of the stream to select (e.g. column). """ - sync_mode: DockerStreamsSyncMode = Field(alias="syncMode") + sync_mode: DockerStreamsSyncMode model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/docker_streams_input.py b/algoliasearch/ingestion/models/docker_streams_input.py index 5e2feb5c4..d32bcf464 100644 --- a/algoliasearch/ingestion/models/docker_streams_input.py +++ b/algoliasearch/ingestion/models/docker_streams_input.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,28 @@ from algoliasearch.ingestion.models.docker_streams import DockerStreams +_ALIASES = { + "streams": "streams", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class DockerStreamsInput(BaseModel): """ The selected streams of a singer or airbyte connector. """ - streams: List[DockerStreams] = Field(alias="streams") + streams: List[DockerStreams] model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/error_base.py b/algoliasearch/ingestion/models/error_base.py index 075d8a3ac..01e47a756 100644 --- a/algoliasearch/ingestion/models/error_base.py +++ b/algoliasearch/ingestion/models/error_base.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,28 @@ from typing_extensions import Self +_ALIASES = { + "message": "message", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class ErrorBase(BaseModel): """ Error. """ - message: Optional[str] = Field(default=None, alias="message") + message: Optional[str] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, extra="allow", ) diff --git a/algoliasearch/ingestion/models/event.py b/algoliasearch/ingestion/models/event.py index 278dfa3bd..7f9585105 100644 --- a/algoliasearch/ingestion/models/event.py +++ b/algoliasearch/ingestion/models/event.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,24 +21,39 @@ from algoliasearch.ingestion.models.event_status import EventStatus from algoliasearch.ingestion.models.event_type import EventType +_ALIASES = { + "event_id": "eventID", + "run_id": "runID", + "parent_id": "parentID", + "status": "status", + "type": "type", + "batch_size": "batchSize", + "data": "data", + "published_at": "publishedAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class Event(BaseModel): """ An event describe a step of the task execution flow.. """ - event_id: str = Field(alias="eventID") + event_id: str """ Universally unique identifier (UUID) of an event. """ - run_id: str = Field(alias="runID") + run_id: str """ Universally unique identifier (UUID) of a task run. """ - parent_id: Optional[str] = Field(default=None, alias="parentID") + parent_id: Optional[str] = None """ The parent event, the cause of this event. """ - status: EventStatus = Field(alias="status") - type: EventType = Field(alias="type") - batch_size: int = Field(alias="batchSize") + status: EventStatus + type: EventType + batch_size: int """ The extracted record batch size. """ - data: Optional[Dict[str, object]] = Field(default=None, alias="data") - published_at: str = Field(alias="publishedAt") + data: Optional[Dict[str, object]] = None + published_at: str """ Date of publish RFC 3339 format. """ model_config = ConfigDict( @@ -46,6 +61,7 @@ class Event(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/list_authentications_response.py b/algoliasearch/ingestion/models/list_authentications_response.py index 2f121b094..ff7aa142c 100644 --- a/algoliasearch/ingestion/models/list_authentications_response.py +++ b/algoliasearch/ingestion/models/list_authentications_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,20 +21,30 @@ from algoliasearch.ingestion.models.authentication import Authentication from algoliasearch.ingestion.models.pagination import Pagination +_ALIASES = { + "authentications": "authentications", + "pagination": "pagination", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ListAuthenticationsResponse(BaseModel): """ ListAuthenticationsResponse """ - authentications: List[Authentication] = Field(alias="authentications") - pagination: Pagination = Field(alias="pagination") + authentications: List[Authentication] + pagination: Pagination model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/list_destinations_response.py b/algoliasearch/ingestion/models/list_destinations_response.py index 28b9ced85..69989a727 100644 --- a/algoliasearch/ingestion/models/list_destinations_response.py +++ b/algoliasearch/ingestion/models/list_destinations_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,20 +21,30 @@ from algoliasearch.ingestion.models.destination import Destination from algoliasearch.ingestion.models.pagination import Pagination +_ALIASES = { + "destinations": "destinations", + "pagination": "pagination", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ListDestinationsResponse(BaseModel): """ ListDestinationsResponse """ - destinations: List[Destination] = Field(alias="destinations") - pagination: Pagination = Field(alias="pagination") + destinations: List[Destination] + pagination: Pagination model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/list_events_response.py b/algoliasearch/ingestion/models/list_events_response.py index 40866f464..9da5e054d 100644 --- a/algoliasearch/ingestion/models/list_events_response.py +++ b/algoliasearch/ingestion/models/list_events_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,21 +22,32 @@ from algoliasearch.ingestion.models.pagination import Pagination from algoliasearch.ingestion.models.window import Window +_ALIASES = { + "events": "events", + "pagination": "pagination", + "window": "window", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ListEventsResponse(BaseModel): """ ListEventsResponse """ - events: List[Event] = Field(alias="events") - pagination: Pagination = Field(alias="pagination") - window: Window = Field(alias="window") + events: List[Event] + pagination: Pagination + window: Window model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/list_sources_response.py b/algoliasearch/ingestion/models/list_sources_response.py index 683c9aed0..ef5986605 100644 --- a/algoliasearch/ingestion/models/list_sources_response.py +++ b/algoliasearch/ingestion/models/list_sources_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,20 +21,30 @@ from algoliasearch.ingestion.models.pagination import Pagination from algoliasearch.ingestion.models.source import Source +_ALIASES = { + "sources": "sources", + "pagination": "pagination", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ListSourcesResponse(BaseModel): """ ListSourcesResponse """ - sources: List[Source] = Field(alias="sources") - pagination: Pagination = Field(alias="pagination") + sources: List[Source] + pagination: Pagination model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/list_tasks_response.py b/algoliasearch/ingestion/models/list_tasks_response.py index 775fb351d..7c4cdd6d2 100644 --- a/algoliasearch/ingestion/models/list_tasks_response.py +++ b/algoliasearch/ingestion/models/list_tasks_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,20 +21,30 @@ from algoliasearch.ingestion.models.pagination import Pagination from algoliasearch.ingestion.models.task import Task +_ALIASES = { + "tasks": "tasks", + "pagination": "pagination", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ListTasksResponse(BaseModel): """ Configured tasks and pagination information. """ - tasks: List[Task] = Field(alias="tasks") - pagination: Pagination = Field(alias="pagination") + tasks: List[Task] + pagination: Pagination model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/list_tasks_response_v1.py b/algoliasearch/ingestion/models/list_tasks_response_v1.py index 02f66ee59..73e965796 100644 --- a/algoliasearch/ingestion/models/list_tasks_response_v1.py +++ b/algoliasearch/ingestion/models/list_tasks_response_v1.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,20 +21,30 @@ from algoliasearch.ingestion.models.pagination import Pagination from algoliasearch.ingestion.models.task_v1 import TaskV1 +_ALIASES = { + "tasks": "tasks", + "pagination": "pagination", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ListTasksResponseV1(BaseModel): """ Configured tasks and pagination information. """ - tasks: List[TaskV1] = Field(alias="tasks") - pagination: Pagination = Field(alias="pagination") + tasks: List[TaskV1] + pagination: Pagination model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/list_transformations_response.py b/algoliasearch/ingestion/models/list_transformations_response.py index 2262b6b03..3a76a2f2c 100644 --- a/algoliasearch/ingestion/models/list_transformations_response.py +++ b/algoliasearch/ingestion/models/list_transformations_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,20 +21,30 @@ from algoliasearch.ingestion.models.pagination import Pagination from algoliasearch.ingestion.models.transformation import Transformation +_ALIASES = { + "transformations": "transformations", + "pagination": "pagination", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ListTransformationsResponse(BaseModel): """ Configured transformations and pagination information. """ - transformations: List[Transformation] = Field(alias="transformations") - pagination: Pagination = Field(alias="pagination") + transformations: List[Transformation] + pagination: Pagination model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/mapping_field_directive.py b/algoliasearch/ingestion/models/mapping_field_directive.py index ee75acbce..d5ae23a33 100644 --- a/algoliasearch/ingestion/models/mapping_field_directive.py +++ b/algoliasearch/ingestion/models/mapping_field_directive.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "field_key": "fieldKey", + "value": "value", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class MappingFieldDirective(BaseModel): """ Describes how a field should be resolved by applying a set of directives. """ - field_key: str = Field(alias="fieldKey") + field_key: str """ Destination field key. """ - value: Dict[str, object] = Field(alias="value") + value: Dict[str, object] """ How the destination field should be resolved from the source. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class MappingFieldDirective(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/mapping_input.py b/algoliasearch/ingestion/models/mapping_input.py index fe8159c2a..e28df8568 100644 --- a/algoliasearch/ingestion/models/mapping_input.py +++ b/algoliasearch/ingestion/models/mapping_input.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,20 +21,30 @@ from algoliasearch.ingestion.models.mapping_format_schema import MappingFormatSchema from algoliasearch.ingestion.models.mapping_kit_action import MappingKitAction +_ALIASES = { + "format": "format", + "actions": "actions", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class MappingInput(BaseModel): """ Transformations to apply to the source, serialized as a JSON string. """ - format: MappingFormatSchema = Field(alias="format") - actions: List[MappingKitAction] = Field(alias="actions") + format: MappingFormatSchema + actions: List[MappingKitAction] model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/mapping_kit_action.py b/algoliasearch/ingestion/models/mapping_kit_action.py index 1cf3285be..c0d1e5d41 100644 --- a/algoliasearch/ingestion/models/mapping_kit_action.py +++ b/algoliasearch/ingestion/models/mapping_kit_action.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,25 +20,37 @@ from algoliasearch.ingestion.models.mapping_field_directive import MappingFieldDirective +_ALIASES = { + "id": "id", + "enabled": "enabled", + "trigger": "trigger", + "field_directives": "fieldDirectives", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class MappingKitAction(BaseModel): """ Describes how a destination object should be resolved by means of applying a set of directives. """ - id: Optional[str] = Field(default=None, alias="id") + id: Optional[str] = None """ ID to uniquely identify this action. """ - enabled: bool = Field(alias="enabled") + enabled: bool """ Whether this action has any effect. """ - trigger: str = Field(alias="trigger") + trigger: str """ Condition which must be satisfied to apply the action. If this evaluates to false, the action is not applied, and the process attempts to apply the next action, if any. """ - field_directives: List[MappingFieldDirective] = Field(alias="fieldDirectives") + field_directives: List[MappingFieldDirective] model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/on_demand_trigger.py b/algoliasearch/ingestion/models/on_demand_trigger.py index 0afe069a4..c6bb7dc56 100644 --- a/algoliasearch/ingestion/models/on_demand_trigger.py +++ b/algoliasearch/ingestion/models/on_demand_trigger.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,14 +20,23 @@ from algoliasearch.ingestion.models.on_demand_trigger_type import OnDemandTriggerType +_ALIASES = { + "type": "type", + "last_run": "lastRun", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class OnDemandTrigger(BaseModel): """ Trigger information for manually-triggered tasks. """ - type: OnDemandTriggerType = Field(alias="type") - last_run: Optional[str] = Field(default=None, alias="lastRun") + type: OnDemandTriggerType + last_run: Optional[str] = None """ The last time the scheduled task ran in RFC 3339 format. """ model_config = ConfigDict( @@ -35,6 +44,7 @@ class OnDemandTrigger(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/on_demand_trigger_input.py b/algoliasearch/ingestion/models/on_demand_trigger_input.py index 743ceaca1..e472ca847 100644 --- a/algoliasearch/ingestion/models/on_demand_trigger_input.py +++ b/algoliasearch/ingestion/models/on_demand_trigger_input.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,28 @@ from algoliasearch.ingestion.models.on_demand_trigger_type import OnDemandTriggerType +_ALIASES = { + "type": "type", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class OnDemandTriggerInput(BaseModel): """ Trigger information for manually-triggered tasks. """ - type: OnDemandTriggerType = Field(alias="type") + type: OnDemandTriggerType model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/pagination.py b/algoliasearch/ingestion/models/pagination.py index 1ca0f6236..6b41ab821 100644 --- a/algoliasearch/ingestion/models/pagination.py +++ b/algoliasearch/ingestion/models/pagination.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,30 @@ from typing_extensions import Self +_ALIASES = { + "nb_pages": "nbPages", + "page": "page", + "nb_items": "nbItems", + "items_per_page": "itemsPerPage", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class Pagination(BaseModel): """ Paginated API response. """ - nb_pages: int = Field(alias="nbPages") + nb_pages: int """ Number of pages in the API response. """ - page: int = Field(alias="page") + page: int """ Page of the API response to retrieve. """ - nb_items: int = Field(alias="nbItems") + nb_items: int """ Number of items in the API response. """ - items_per_page: int = Field(alias="itemsPerPage") + items_per_page: int """ Number of items per page. """ model_config = ConfigDict( @@ -37,6 +49,7 @@ class Pagination(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/platform_with_none.py b/algoliasearch/ingestion/models/platform_with_none.py index 4d368b32e..4d16e5c66 100644 --- a/algoliasearch/ingestion/models/platform_with_none.py +++ b/algoliasearch/ingestion/models/platform_with_none.py @@ -31,7 +31,7 @@ class PlatformWithNone(BaseModel): oneof_schema_2_validator: Optional[PlatformNone] = Field(default=None) - actual_instance: Optional[Union[Platform, PlatformNone]] = None + actual_instance: Union[Platform, PlatformNone, None] = None one_of_schemas: Set[str] = {"Platform", "PlatformNone"} def __init__(self, *args, **kwargs) -> None: @@ -44,12 +44,12 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[Platform, PlatformNone]]: + def unwrap_actual_instance(self) -> Union[Platform, PlatformNone, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -90,9 +90,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -102,8 +102,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], Platform, PlatformNone]]: return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/ingestion/models/push_task_payload.py b/algoliasearch/ingestion/models/push_task_payload.py index b0dd7f274..fd2488d17 100644 --- a/algoliasearch/ingestion/models/push_task_payload.py +++ b/algoliasearch/ingestion/models/push_task_payload.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,20 +21,30 @@ from algoliasearch.ingestion.models.action import Action from algoliasearch.ingestion.models.push_task_records import PushTaskRecords +_ALIASES = { + "action": "action", + "records": "records", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class PushTaskPayload(BaseModel): """ PushTaskPayload """ - action: Action = Field(alias="action") - records: List[PushTaskRecords] = Field(alias="records") + action: Action + records: List[PushTaskRecords] model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/push_task_records.py b/algoliasearch/ingestion/models/push_task_records.py index 04af5790f..ef9d7d7b1 100644 --- a/algoliasearch/ingestion/models/push_task_records.py +++ b/algoliasearch/ingestion/models/push_task_records.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "object_id": "objectID", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class PushTaskRecords(BaseModel): """ PushTaskRecords """ - object_id: str = Field(alias="objectID") + object_id: str """ Unique record identifier. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class PushTaskRecords(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, extra="allow", ) diff --git a/algoliasearch/ingestion/models/run.py b/algoliasearch/ingestion/models/run.py index 9bd50327a..be8524fa6 100644 --- a/algoliasearch/ingestion/models/run.py +++ b/algoliasearch/ingestion/models/run.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -24,31 +24,51 @@ from algoliasearch.ingestion.models.run_status import RunStatus from algoliasearch.ingestion.models.run_type import RunType +_ALIASES = { + "run_id": "runID", + "app_id": "appID", + "task_id": "taskID", + "status": "status", + "progress": "progress", + "outcome": "outcome", + "failure_threshold": "failureThreshold", + "reason": "reason", + "reason_code": "reasonCode", + "type": "type", + "created_at": "createdAt", + "started_at": "startedAt", + "finished_at": "finishedAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class Run(BaseModel): """ Run """ - run_id: str = Field(alias="runID") + run_id: str """ Universally unique identifier (UUID) of a task run. """ - app_id: str = Field(alias="appID") - task_id: str = Field(alias="taskID") + app_id: str + task_id: str """ Universally unique identifier (UUID) of a task. """ - status: RunStatus = Field(alias="status") - progress: Optional[RunProgress] = Field(default=None, alias="progress") - outcome: Optional[RunOutcome] = Field(default=None, alias="outcome") - failure_threshold: Optional[int] = Field(default=None, alias="failureThreshold") + status: RunStatus + progress: Optional[RunProgress] = None + outcome: Optional[RunOutcome] = None + failure_threshold: Optional[int] = None """ Maximum accepted percentage of failures for a task run to finish successfully. """ - reason: Optional[str] = Field(default=None, alias="reason") + reason: Optional[str] = None """ More information about the task run's outcome. """ - reason_code: Optional[RunReasonCode] = Field(default=None, alias="reasonCode") - type: RunType = Field(alias="type") - created_at: str = Field(alias="createdAt") + reason_code: Optional[RunReasonCode] = None + type: RunType + created_at: str """ Date of creation in RFC 3339 format. """ - started_at: Optional[str] = Field(default=None, alias="startedAt") + started_at: Optional[str] = None """ Date of start in RFC 3339 format. """ - finished_at: Optional[str] = Field(default=None, alias="finishedAt") + finished_at: Optional[str] = None """ Date of finish in RFC 3339 format. """ model_config = ConfigDict( @@ -56,6 +76,7 @@ class Run(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/run_list_response.py b/algoliasearch/ingestion/models/run_list_response.py index 382550a0c..1cb94178d 100644 --- a/algoliasearch/ingestion/models/run_list_response.py +++ b/algoliasearch/ingestion/models/run_list_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,21 +22,32 @@ from algoliasearch.ingestion.models.run import Run from algoliasearch.ingestion.models.window import Window +_ALIASES = { + "runs": "runs", + "pagination": "pagination", + "window": "window", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class RunListResponse(BaseModel): """ RunListResponse """ - runs: List[Run] = Field(alias="runs") - pagination: Pagination = Field(alias="pagination") - window: Window = Field(alias="window") + runs: List[Run] + pagination: Pagination + window: Window model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/run_progress.py b/algoliasearch/ingestion/models/run_progress.py index 335c7df2a..75e85cc13 100644 --- a/algoliasearch/ingestion/models/run_progress.py +++ b/algoliasearch/ingestion/models/run_progress.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,23 +18,30 @@ from typing_extensions import Self +_ALIASES = { + "expected_nb_of_events": "expectedNbOfEvents", + "received_nb_of_events": "receivedNbOfEvents", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class RunProgress(BaseModel): """ RunProgress """ - expected_nb_of_events: Optional[int] = Field( - default=None, alias="expectedNbOfEvents" - ) - received_nb_of_events: Optional[int] = Field( - default=None, alias="receivedNbOfEvents" - ) + expected_nb_of_events: Optional[int] = None + received_nb_of_events: Optional[int] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/run_response.py b/algoliasearch/ingestion/models/run_response.py index 94c5492c6..acf0aaf17 100644 --- a/algoliasearch/ingestion/models/run_response.py +++ b/algoliasearch/ingestion/models/run_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "run_id": "runID", + "created_at": "createdAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class RunResponse(BaseModel): """ API response for running a task. """ - run_id: str = Field(alias="runID") + run_id: str """ Universally unique identifier (UUID) of a task run. """ - created_at: str = Field(alias="createdAt") + created_at: str """ Date of creation in RFC 3339 format. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class RunResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/run_source_payload.py b/algoliasearch/ingestion/models/run_source_payload.py index 8eeefdb28..a8d73878c 100644 --- a/algoliasearch/ingestion/models/run_source_payload.py +++ b/algoliasearch/ingestion/models/run_source_payload.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,25 +20,37 @@ from algoliasearch.ingestion.models.entity_type import EntityType +_ALIASES = { + "index_to_include": "indexToInclude", + "index_to_exclude": "indexToExclude", + "entity_ids": "entityIDs", + "entity_type": "entityType", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class RunSourcePayload(BaseModel): """ RunSourcePayload """ - index_to_include: Optional[List[str]] = Field(default=None, alias="indexToInclude") + index_to_include: Optional[List[str]] = None """ List of index names to include in reidexing/update. """ - index_to_exclude: Optional[List[str]] = Field(default=None, alias="indexToExclude") + index_to_exclude: Optional[List[str]] = None """ List of index names to exclude in reidexing/update. """ - entity_ids: Optional[List[str]] = Field(default=None, alias="entityIDs") + entity_ids: Optional[List[str]] = None """ List of entityID to update. """ - entity_type: Optional[EntityType] = Field(default=None, alias="entityType") + entity_type: Optional[EntityType] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/run_source_response.py b/algoliasearch/ingestion/models/run_source_response.py index 5ee76ca3c..5f33fa1da 100644 --- a/algoliasearch/ingestion/models/run_source_response.py +++ b/algoliasearch/ingestion/models/run_source_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "task_with_run_id": "taskWithRunID", + "created_at": "createdAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class RunSourceResponse(BaseModel): """ RunSourceResponse """ - task_with_run_id: Dict[str, str] = Field(alias="taskWithRunID") + task_with_run_id: Dict[str, str] """ Map of taskID sent for reindex with the corresponding runID. """ - created_at: str = Field(alias="createdAt") + created_at: str """ Date of creation in RFC 3339 format. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class RunSourceResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/schedule_trigger.py b/algoliasearch/ingestion/models/schedule_trigger.py index 00be9f92d..b01e7ab14 100644 --- a/algoliasearch/ingestion/models/schedule_trigger.py +++ b/algoliasearch/ingestion/models/schedule_trigger.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,18 +20,29 @@ from algoliasearch.ingestion.models.schedule_trigger_type import ScheduleTriggerType +_ALIASES = { + "type": "type", + "cron": "cron", + "last_run": "lastRun", + "next_run": "nextRun", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ScheduleTrigger(BaseModel): """ Trigger information for scheduled tasks. """ - type: ScheduleTriggerType = Field(alias="type") - cron: str = Field(alias="cron") + type: ScheduleTriggerType + cron: str """ Cron expression for the task's schedule. """ - last_run: Optional[str] = Field(default=None, alias="lastRun") + last_run: Optional[str] = None """ The last time the scheduled task ran in RFC 3339 format. """ - next_run: str = Field(alias="nextRun") + next_run: str """ The next scheduled run of the task in RFC 3339 format. """ model_config = ConfigDict( @@ -39,6 +50,7 @@ class ScheduleTrigger(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/schedule_trigger_input.py b/algoliasearch/ingestion/models/schedule_trigger_input.py index 99d8174c4..99ba16d3a 100644 --- a/algoliasearch/ingestion/models/schedule_trigger_input.py +++ b/algoliasearch/ingestion/models/schedule_trigger_input.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,14 +20,23 @@ from algoliasearch.ingestion.models.schedule_trigger_type import ScheduleTriggerType +_ALIASES = { + "type": "type", + "cron": "cron", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ScheduleTriggerInput(BaseModel): """ Trigger input for scheduled tasks. """ - type: ScheduleTriggerType = Field(alias="type") - cron: str = Field(alias="cron") + type: ScheduleTriggerType + cron: str """ Cron expression for the task's schedule. """ model_config = ConfigDict( @@ -35,6 +44,7 @@ class ScheduleTriggerInput(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/shopify_input.py b/algoliasearch/ingestion/models/shopify_input.py index ffd294381..cd0209254 100644 --- a/algoliasearch/ingestion/models/shopify_input.py +++ b/algoliasearch/ingestion/models/shopify_input.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,20 +21,30 @@ from algoliasearch.ingestion.models.shopify_market import ShopifyMarket from algoliasearch.ingestion.models.shopify_metafield import ShopifyMetafield +_ALIASES = { + "metafields": "metafields", + "market": "market", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ShopifyInput(BaseModel): """ Represents the required elements of the task input when using a `shopify` source. """ - metafields: List[ShopifyMetafield] = Field(alias="metafields") - market: ShopifyMarket = Field(alias="market") + metafields: List[ShopifyMetafield] + market: ShopifyMarket model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/shopify_market.py b/algoliasearch/ingestion/models/shopify_market.py index 8f4950bba..2a9dd43eb 100644 --- a/algoliasearch/ingestion/models/shopify_market.py +++ b/algoliasearch/ingestion/models/shopify_market.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,20 +18,32 @@ from typing_extensions import Self +_ALIASES = { + "countries": "countries", + "currencies": "currencies", + "locales": "locales", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class ShopifyMarket(BaseModel): """ Represents a market in Shopify. """ - countries: List[str] = Field(alias="countries") - currencies: List[str] = Field(alias="currencies") - locales: List[str] = Field(alias="locales") + countries: List[str] + currencies: List[str] + locales: List[str] model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/shopify_metafield.py b/algoliasearch/ingestion/models/shopify_metafield.py index 2f3f5ab27..006128ab0 100644 --- a/algoliasearch/ingestion/models/shopify_metafield.py +++ b/algoliasearch/ingestion/models/shopify_metafield.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,20 +18,32 @@ from typing_extensions import Self +_ALIASES = { + "namespace": "namespace", + "key": "key", + "value": "value", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class ShopifyMetafield(BaseModel): """ Represents a metafield in Shopify. """ - namespace: str = Field(alias="namespace") - key: str = Field(alias="key") - value: str = Field(alias="value") + namespace: str + key: str + value: str model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/source.py b/algoliasearch/ingestion/models/source.py index 4b1898990..846101cb1 100644 --- a/algoliasearch/ingestion/models/source.py +++ b/algoliasearch/ingestion/models/source.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,22 +21,36 @@ from algoliasearch.ingestion.models.source_input import SourceInput from algoliasearch.ingestion.models.source_type import SourceType +_ALIASES = { + "source_id": "sourceID", + "type": "type", + "name": "name", + "input": "input", + "authentication_id": "authenticationID", + "created_at": "createdAt", + "updated_at": "updatedAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class Source(BaseModel): """ Source """ - source_id: str = Field(alias="sourceID") + source_id: str """ Universally uniqud identifier (UUID) of a source. """ - type: SourceType = Field(alias="type") - name: str = Field(alias="name") - input: Optional[SourceInput] = Field(default=None, alias="input") - authentication_id: Optional[str] = Field(default=None, alias="authenticationID") + type: SourceType + name: str + input: Optional[SourceInput] = None + authentication_id: Optional[str] = None """ Universally unique identifier (UUID) of an authentication resource. """ - created_at: str = Field(alias="createdAt") + created_at: str """ Date of creation in RFC 3339 format. """ - updated_at: Optional[str] = Field(default=None, alias="updatedAt") + updated_at: Optional[str] = None """ Date of last update in RFC 3339 format. """ model_config = ConfigDict( @@ -44,6 +58,7 @@ class Source(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/source_big_commerce.py b/algoliasearch/ingestion/models/source_big_commerce.py index 360a4010c..9c2394a52 100644 --- a/algoliasearch/ingestion/models/source_big_commerce.py +++ b/algoliasearch/ingestion/models/source_big_commerce.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,28 +21,37 @@ from algoliasearch.ingestion.models.big_commerce_channel import BigCommerceChannel from algoliasearch.ingestion.models.big_commerce_metafield import BigCommerceMetafield +_ALIASES = { + "store_hash": "storeHash", + "channel": "channel", + "custom_fields": "customFields", + "product_metafields": "productMetafields", + "variant_metafields": "variantMetafields", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SourceBigCommerce(BaseModel): """ SourceBigCommerce """ - store_hash: str = Field(alias="storeHash") + store_hash: str """ Store hash identifying your BigCommerce store. """ - channel: Optional[BigCommerceChannel] = Field(default=None, alias="channel") - custom_fields: Optional[List[str]] = Field(default=None, alias="customFields") - product_metafields: Optional[List[BigCommerceMetafield]] = Field( - default=None, alias="productMetafields" - ) - variant_metafields: Optional[List[BigCommerceMetafield]] = Field( - default=None, alias="variantMetafields" - ) + channel: Optional[BigCommerceChannel] = None + custom_fields: Optional[List[str]] = None + product_metafields: Optional[List[BigCommerceMetafield]] = None + variant_metafields: Optional[List[BigCommerceMetafield]] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/source_big_query.py b/algoliasearch/ingestion/models/source_big_query.py index 3ae98d6ea..5bef76efc 100644 --- a/algoliasearch/ingestion/models/source_big_query.py +++ b/algoliasearch/ingestion/models/source_big_query.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,24 +20,38 @@ from algoliasearch.ingestion.models.big_query_data_type import BigQueryDataType +_ALIASES = { + "project_id": "projectID", + "dataset_id": "datasetID", + "data_type": "dataType", + "table": "table", + "table_prefix": "tablePrefix", + "custom_sql_request": "customSQLRequest", + "unique_id_column": "uniqueIDColumn", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SourceBigQuery(BaseModel): """ SourceBigQuery """ - project_id: str = Field(alias="projectID") + project_id: str """ Project ID of the BigQuery source. """ - dataset_id: str = Field(alias="datasetID") + dataset_id: str """ Dataset ID of the BigQuery source. """ - data_type: Optional[BigQueryDataType] = Field(default=None, alias="dataType") - table: Optional[str] = Field(default=None, alias="table") + data_type: Optional[BigQueryDataType] = None + table: Optional[str] = None """ Table name for the BigQuery export. """ - table_prefix: Optional[str] = Field(default=None, alias="tablePrefix") + table_prefix: Optional[str] = None """ Table prefix for a Google Analytics 4 data export to BigQuery. """ - custom_sql_request: Optional[str] = Field(default=None, alias="customSQLRequest") + custom_sql_request: Optional[str] = None """ Custom SQL request to extract data from the BigQuery table. """ - unique_id_column: Optional[str] = Field(default=None, alias="uniqueIDColumn") + unique_id_column: Optional[str] = None """ Name of a column that contains a unique ID which will be used as `objectID` in Algolia. """ model_config = ConfigDict( @@ -45,6 +59,7 @@ class SourceBigQuery(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/source_commercetools.py b/algoliasearch/ingestion/models/source_commercetools.py index 8f2923cbb..bcc6208f2 100644 --- a/algoliasearch/ingestion/models/source_commercetools.py +++ b/algoliasearch/ingestion/models/source_commercetools.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,30 +22,40 @@ CommercetoolsCustomFields, ) +_ALIASES = { + "store_keys": "storeKeys", + "locales": "locales", + "url": "url", + "project_key": "projectKey", + "fallback_is_in_stock_value": "fallbackIsInStockValue", + "custom_fields": "customFields", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SourceCommercetools(BaseModel): """ SourceCommercetools """ - store_keys: Optional[List[str]] = Field(default=None, alias="storeKeys") - locales: Optional[List[str]] = Field(default=None, alias="locales") + store_keys: Optional[List[str]] = None + locales: Optional[List[str]] = None """ Locales for your commercetools stores. """ - url: str = Field(alias="url") - project_key: str = Field(alias="projectKey") - fallback_is_in_stock_value: Optional[bool] = Field( - default=None, alias="fallbackIsInStockValue" - ) + url: str + project_key: str + fallback_is_in_stock_value: Optional[bool] = None """ Whether a fallback value is stored in the Algolia record if there's no inventory information about the product. """ - custom_fields: Optional[CommercetoolsCustomFields] = Field( - default=None, alias="customFields" - ) + custom_fields: Optional[CommercetoolsCustomFields] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/source_create.py b/algoliasearch/ingestion/models/source_create.py index 06cb27313..b27c28d02 100644 --- a/algoliasearch/ingestion/models/source_create.py +++ b/algoliasearch/ingestion/models/source_create.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,17 +21,28 @@ from algoliasearch.ingestion.models.source_input import SourceInput from algoliasearch.ingestion.models.source_type import SourceType +_ALIASES = { + "type": "type", + "name": "name", + "input": "input", + "authentication_id": "authenticationID", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SourceCreate(BaseModel): """ SourceCreate """ - type: SourceType = Field(alias="type") - name: str = Field(alias="name") + type: SourceType + name: str """ Descriptive name of the source. """ - input: Optional[SourceInput] = Field(default=None, alias="input") - authentication_id: Optional[str] = Field(default=None, alias="authenticationID") + input: Optional[SourceInput] = None + authentication_id: Optional[str] = None """ Universally unique identifier (UUID) of an authentication resource. """ model_config = ConfigDict( @@ -39,6 +50,7 @@ class SourceCreate(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/source_create_response.py b/algoliasearch/ingestion/models/source_create_response.py index ef30e472a..272870eb1 100644 --- a/algoliasearch/ingestion/models/source_create_response.py +++ b/algoliasearch/ingestion/models/source_create_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "source_id": "sourceID", + "name": "name", + "created_at": "createdAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class SourceCreateResponse(BaseModel): """ SourceCreateResponse """ - source_id: str = Field(alias="sourceID") + source_id: str """ Universally uniqud identifier (UUID) of a source. """ - name: str = Field(alias="name") + name: str """ Descriptive name of the source. """ - created_at: str = Field(alias="createdAt") + created_at: str """ Date of creation in RFC 3339 format. """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class SourceCreateResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/source_csv.py b/algoliasearch/ingestion/models/source_csv.py index 993489a01..ff8a4aaa5 100644 --- a/algoliasearch/ingestion/models/source_csv.py +++ b/algoliasearch/ingestion/models/source_csv.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,20 +21,32 @@ from algoliasearch.ingestion.models.mapping_type_csv import MappingTypeCSV from algoliasearch.ingestion.models.method_type import MethodType +_ALIASES = { + "url": "url", + "unique_id_column": "uniqueIDColumn", + "mapping": "mapping", + "method": "method", + "delimiter": "delimiter", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SourceCSV(BaseModel): """ SourceCSV """ - url: str = Field(alias="url") + url: str """ URL of the file. """ - unique_id_column: Optional[str] = Field(default=None, alias="uniqueIDColumn") + unique_id_column: Optional[str] = None """ Name of a column that contains a unique ID which will be used as `objectID` in Algolia. """ - mapping: Optional[Dict[str, MappingTypeCSV]] = Field(default=None, alias="mapping") + mapping: Optional[Dict[str, MappingTypeCSV]] = None """ Key-value pairs of column names and their expected types. """ - method: Optional[MethodType] = Field(default=None, alias="method") - delimiter: Optional[str] = Field(default=None, alias="delimiter") + method: Optional[MethodType] = None + delimiter: Optional[str] = None """ The character used to split the value on each line, default to a comma (\\r, \\n, 0xFFFD, and space are forbidden). """ model_config = ConfigDict( @@ -42,6 +54,7 @@ class SourceCSV(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: @@ -69,7 +82,9 @@ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: if not isinstance(obj, dict): return cls.model_validate(obj) - obj["mapping"] = dict((_k, _v) for _k, _v in obj.get("mapping").items()) + mapping = obj.get("mapping") + if mapping is not None: + obj["mapping"] = dict((_k, _v) for _k, _v in mapping.items()) obj["method"] = obj.get("method") return cls.model_validate(obj) diff --git a/algoliasearch/ingestion/models/source_docker.py b/algoliasearch/ingestion/models/source_docker.py index 4087e553c..4129fe423 100644 --- a/algoliasearch/ingestion/models/source_docker.py +++ b/algoliasearch/ingestion/models/source_docker.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,19 +21,31 @@ from algoliasearch.ingestion.models.docker_image_type import DockerImageType from algoliasearch.ingestion.models.docker_registry import DockerRegistry +_ALIASES = { + "image_type": "imageType", + "registry": "registry", + "image": "image", + "version": "version", + "configuration": "configuration", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SourceDocker(BaseModel): """ SourceDocker """ - image_type: DockerImageType = Field(alias="imageType") - registry: DockerRegistry = Field(alias="registry") - image: str = Field(alias="image") + image_type: DockerImageType + registry: DockerRegistry + image: str """ Docker image name. """ - version: Optional[str] = Field(default=None, alias="version") + version: Optional[str] = None """ Docker image version. """ - configuration: object = Field(alias="configuration") + configuration: object """ Configuration of the spec. """ model_config = ConfigDict( @@ -41,6 +53,7 @@ class SourceDocker(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/source_ga4_big_query_export.py b/algoliasearch/ingestion/models/source_ga4_big_query_export.py index 98dad13d0..e2bc1aefe 100644 --- a/algoliasearch/ingestion/models/source_ga4_big_query_export.py +++ b/algoliasearch/ingestion/models/source_ga4_big_query_export.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "project_id": "projectID", + "dataset_id": "datasetID", + "table_prefix": "tablePrefix", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class SourceGA4BigQueryExport(BaseModel): """ SourceGA4BigQueryExport """ - project_id: str = Field(alias="projectID") + project_id: str """ GCP project ID that the BigQuery export writes to. """ - dataset_id: str = Field(alias="datasetID") + dataset_id: str """ BigQuery dataset ID that the BigQuery export writes to. """ - table_prefix: str = Field(alias="tablePrefix") + table_prefix: str """ Prefix of the tables that the BigQuery Export writes to. """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class SourceGA4BigQueryExport(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/source_input.py b/algoliasearch/ingestion/models/source_input.py index 68c6cec34..6ecce0e25 100644 --- a/algoliasearch/ingestion/models/source_input.py +++ b/algoliasearch/ingestion/models/source_input.py @@ -51,17 +51,16 @@ class SourceInput(BaseModel): oneof_schema_8_validator: Optional[SourceShopify] = Field(default=None) - actual_instance: Optional[ - Union[ - SourceBigCommerce, - SourceBigQuery, - SourceCSV, - SourceCommercetools, - SourceDocker, - SourceGA4BigQueryExport, - SourceJSON, - SourceShopify, - ] + actual_instance: Union[ + SourceBigCommerce, + SourceBigQuery, + SourceCSV, + SourceCommercetools, + SourceDocker, + SourceGA4BigQueryExport, + SourceJSON, + SourceShopify, + None, ] = None one_of_schemas: Set[str] = { "SourceBigCommerce", @@ -84,24 +83,24 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer def unwrap_actual_instance( self, - ) -> Optional[ - Union[ - SourceBigCommerce, - SourceBigQuery, - SourceCSV, - SourceCommercetools, - SourceDocker, - SourceGA4BigQueryExport, - SourceJSON, - SourceShopify, - ] + ) -> Union[ + SourceBigCommerce, + SourceBigQuery, + SourceCSV, + SourceCommercetools, + SourceDocker, + SourceGA4BigQueryExport, + SourceJSON, + SourceShopify, + Self, + None, ]: """ Unwraps the `actual_instance` when calling the `to_json` method. @@ -179,9 +178,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -205,8 +204,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/ingestion/models/source_json.py b/algoliasearch/ingestion/models/source_json.py index cec4f9dc5..8bfffdd6c 100644 --- a/algoliasearch/ingestion/models/source_json.py +++ b/algoliasearch/ingestion/models/source_json.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,23 +20,34 @@ from algoliasearch.ingestion.models.method_type import MethodType +_ALIASES = { + "url": "url", + "unique_id_column": "uniqueIDColumn", + "method": "method", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SourceJSON(BaseModel): """ SourceJSON """ - url: str = Field(alias="url") + url: str """ URL of the file. """ - unique_id_column: Optional[str] = Field(default=None, alias="uniqueIDColumn") + unique_id_column: Optional[str] = None """ Name of a column that contains a unique ID which will be used as `objectID` in Algolia. """ - method: Optional[MethodType] = Field(default=None, alias="method") + method: Optional[MethodType] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/source_search.py b/algoliasearch/ingestion/models/source_search.py index d2213d60c..110ce71bf 100644 --- a/algoliasearch/ingestion/models/source_search.py +++ b/algoliasearch/ingestion/models/source_search.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,28 @@ from typing_extensions import Self +_ALIASES = { + "source_ids": "sourceIDs", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class SourceSearch(BaseModel): """ SourceSearch """ - source_ids: List[str] = Field(alias="sourceIDs") + source_ids: List[str] model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/source_shopify.py b/algoliasearch/ingestion/models/source_shopify.py index 040c7ed02..ed553cd97 100644 --- a/algoliasearch/ingestion/models/source_shopify.py +++ b/algoliasearch/ingestion/models/source_shopify.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "feature_flags": "featureFlags", + "shop_url": "shopURL", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class SourceShopify(BaseModel): """ SourceShopify """ - feature_flags: Optional[Dict[str, object]] = Field( - default=None, alias="featureFlags" - ) + feature_flags: Optional[Dict[str, object]] = None """ Feature flags for the Shopify source. """ - shop_url: str = Field(alias="shopURL") + shop_url: str """ URL of the Shopify store. """ model_config = ConfigDict( @@ -35,6 +43,7 @@ class SourceShopify(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/source_update.py b/algoliasearch/ingestion/models/source_update.py index 2fe91c077..e044cba67 100644 --- a/algoliasearch/ingestion/models/source_update.py +++ b/algoliasearch/ingestion/models/source_update.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,16 +20,26 @@ from algoliasearch.ingestion.models.source_update_input import SourceUpdateInput +_ALIASES = { + "name": "name", + "input": "input", + "authentication_id": "authenticationID", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SourceUpdate(BaseModel): """ SourceUpdate """ - name: Optional[str] = Field(default=None, alias="name") + name: Optional[str] = None """ Descriptive name of the source. """ - input: Optional[SourceUpdateInput] = Field(default=None, alias="input") - authentication_id: Optional[str] = Field(default=None, alias="authenticationID") + input: Optional[SourceUpdateInput] = None + authentication_id: Optional[str] = None """ Universally unique identifier (UUID) of an authentication resource. """ model_config = ConfigDict( @@ -37,6 +47,7 @@ class SourceUpdate(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/source_update_commercetools.py b/algoliasearch/ingestion/models/source_update_commercetools.py index 03a8c1cac..a21c1083c 100644 --- a/algoliasearch/ingestion/models/source_update_commercetools.py +++ b/algoliasearch/ingestion/models/source_update_commercetools.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,29 +22,38 @@ CommercetoolsCustomFields, ) +_ALIASES = { + "store_keys": "storeKeys", + "locales": "locales", + "url": "url", + "fallback_is_in_stock_value": "fallbackIsInStockValue", + "custom_fields": "customFields", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SourceUpdateCommercetools(BaseModel): """ SourceUpdateCommercetools """ - store_keys: Optional[List[str]] = Field(default=None, alias="storeKeys") - locales: Optional[List[str]] = Field(default=None, alias="locales") + store_keys: Optional[List[str]] = None + locales: Optional[List[str]] = None """ Locales for your commercetools stores. """ - url: Optional[str] = Field(default=None, alias="url") - fallback_is_in_stock_value: Optional[bool] = Field( - default=None, alias="fallbackIsInStockValue" - ) + url: Optional[str] = None + fallback_is_in_stock_value: Optional[bool] = None """ Whether a fallback value is stored in the Algolia record if there's no inventory information about the product. """ - custom_fields: Optional[CommercetoolsCustomFields] = Field( - default=None, alias="customFields" - ) + custom_fields: Optional[CommercetoolsCustomFields] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/source_update_docker.py b/algoliasearch/ingestion/models/source_update_docker.py index d2aa19af1..fce3c340d 100644 --- a/algoliasearch/ingestion/models/source_update_docker.py +++ b/algoliasearch/ingestion/models/source_update_docker.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,18 +20,29 @@ from algoliasearch.ingestion.models.docker_registry import DockerRegistry +_ALIASES = { + "registry": "registry", + "image": "image", + "version": "version", + "configuration": "configuration", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SourceUpdateDocker(BaseModel): """ SourceUpdateDocker """ - registry: Optional[DockerRegistry] = Field(default=None, alias="registry") - image: Optional[str] = Field(default=None, alias="image") + registry: Optional[DockerRegistry] = None + image: Optional[str] = None """ Docker image name. """ - version: Optional[str] = Field(default=None, alias="version") + version: Optional[str] = None """ Docker image version. """ - configuration: object = Field(alias="configuration") + configuration: object """ Configuration of the spec. """ model_config = ConfigDict( @@ -39,6 +50,7 @@ class SourceUpdateDocker(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/source_update_input.py b/algoliasearch/ingestion/models/source_update_input.py index 879f6c6d4..aa66fcbf1 100644 --- a/algoliasearch/ingestion/models/source_update_input.py +++ b/algoliasearch/ingestion/models/source_update_input.py @@ -50,16 +50,15 @@ class SourceUpdateInput(BaseModel): oneof_schema_7_validator: Optional[SourceUpdateShopify] = Field(default=None) - actual_instance: Optional[ - Union[ - SourceBigQuery, - SourceCSV, - SourceGA4BigQueryExport, - SourceJSON, - SourceUpdateCommercetools, - SourceUpdateDocker, - SourceUpdateShopify, - ] + actual_instance: Union[ + SourceBigQuery, + SourceCSV, + SourceGA4BigQueryExport, + SourceJSON, + SourceUpdateCommercetools, + SourceUpdateDocker, + SourceUpdateShopify, + None, ] = None one_of_schemas: Set[str] = { "SourceBigQuery", @@ -81,23 +80,23 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer def unwrap_actual_instance( self, - ) -> Optional[ - Union[ - SourceBigQuery, - SourceCSV, - SourceGA4BigQueryExport, - SourceJSON, - SourceUpdateCommercetools, - SourceUpdateDocker, - SourceUpdateShopify, - ] + ) -> Union[ + SourceBigQuery, + SourceCSV, + SourceGA4BigQueryExport, + SourceJSON, + SourceUpdateCommercetools, + SourceUpdateDocker, + SourceUpdateShopify, + Self, + None, ]: """ Unwraps the `actual_instance` when calling the `to_json` method. @@ -169,9 +168,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -194,8 +193,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/ingestion/models/source_update_response.py b/algoliasearch/ingestion/models/source_update_response.py index ddc8434a3..43048b31f 100644 --- a/algoliasearch/ingestion/models/source_update_response.py +++ b/algoliasearch/ingestion/models/source_update_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "source_id": "sourceID", + "name": "name", + "updated_at": "updatedAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class SourceUpdateResponse(BaseModel): """ SourceUpdateResponse """ - source_id: str = Field(alias="sourceID") + source_id: str """ Universally uniqud identifier (UUID) of a source. """ - name: str = Field(alias="name") + name: str """ Descriptive name of the source. """ - updated_at: str = Field(alias="updatedAt") + updated_at: str """ Date of last update in RFC 3339 format. """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class SourceUpdateResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/source_update_shopify.py b/algoliasearch/ingestion/models/source_update_shopify.py index e23bd89ef..295b29d23 100644 --- a/algoliasearch/ingestion/models/source_update_shopify.py +++ b/algoliasearch/ingestion/models/source_update_shopify.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "feature_flags": "featureFlags", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class SourceUpdateShopify(BaseModel): """ SourceUpdateShopify """ - feature_flags: Optional[Dict[str, object]] = Field( - default=None, alias="featureFlags" - ) + feature_flags: Optional[Dict[str, object]] = None """ Feature flags for the Shopify source. """ model_config = ConfigDict( @@ -33,6 +40,7 @@ class SourceUpdateShopify(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/source_watch_response.py b/algoliasearch/ingestion/models/source_watch_response.py index 4bcfbec07..d6256c89f 100644 --- a/algoliasearch/ingestion/models/source_watch_response.py +++ b/algoliasearch/ingestion/models/source_watch_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,30 @@ from algoliasearch.ingestion.models.event import Event +_ALIASES = { + "run_id": "runID", + "data": "data", + "events": "events", + "message": "message", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SourceWatchResponse(BaseModel): """ SourceWatchResponse """ - run_id: Optional[str] = Field(default=None, alias="runID") + run_id: Optional[str] = None """ Universally unique identifier (UUID) of a task run. """ - data: Optional[List[object]] = Field(default=None, alias="data") + data: Optional[List[object]] = None """ depending on the source type, the validation returns sampling data of your source (JSON, CSV, BigQuery). """ - events: Optional[List[Event]] = Field(default=None, alias="events") + events: Optional[List[Event]] = None """ in case of error, observability events will be added to the response, if any. """ - message: str = Field(alias="message") + message: str """ a message describing the outcome of a validate run. """ model_config = ConfigDict( @@ -40,6 +51,7 @@ class SourceWatchResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/streaming_input.py b/algoliasearch/ingestion/models/streaming_input.py index c2cf9a682..67b4cd6f1 100644 --- a/algoliasearch/ingestion/models/streaming_input.py +++ b/algoliasearch/ingestion/models/streaming_input.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,28 @@ from algoliasearch.ingestion.models.mapping_input import MappingInput +_ALIASES = { + "mapping": "mapping", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class StreamingInput(BaseModel): """ Input for a `streaming` task whose source is of type `ga4BigqueryExport` and for which extracted data is continuously streamed. """ - mapping: MappingInput = Field(alias="mapping") + mapping: MappingInput model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/streaming_trigger.py b/algoliasearch/ingestion/models/streaming_trigger.py index 8e133d83e..e5e343f34 100644 --- a/algoliasearch/ingestion/models/streaming_trigger.py +++ b/algoliasearch/ingestion/models/streaming_trigger.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,28 @@ from algoliasearch.ingestion.models.streaming_trigger_type import StreamingTriggerType +_ALIASES = { + "type": "type", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class StreamingTrigger(BaseModel): """ Trigger input for continuously running tasks. """ - type: StreamingTriggerType = Field(alias="type") + type: StreamingTriggerType model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/subscription_trigger.py b/algoliasearch/ingestion/models/subscription_trigger.py index 210a743d9..d233ef410 100644 --- a/algoliasearch/ingestion/models/subscription_trigger.py +++ b/algoliasearch/ingestion/models/subscription_trigger.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,19 +22,28 @@ SubscriptionTriggerType, ) +_ALIASES = { + "type": "type", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SubscriptionTrigger(BaseModel): """ Trigger input for subscription tasks. """ - type: SubscriptionTriggerType = Field(alias="type") + type: SubscriptionTriggerType model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/task.py b/algoliasearch/ingestion/models/task.py index c0efdfcd3..8eec7c330 100644 --- a/algoliasearch/ingestion/models/task.py +++ b/algoliasearch/ingestion/models/task.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,35 +21,55 @@ from algoliasearch.ingestion.models.action_type import ActionType from algoliasearch.ingestion.models.task_input import TaskInput +_ALIASES = { + "task_id": "taskID", + "source_id": "sourceID", + "destination_id": "destinationID", + "cron": "cron", + "last_run": "lastRun", + "next_run": "nextRun", + "input": "input", + "enabled": "enabled", + "failure_threshold": "failureThreshold", + "action": "action", + "cursor": "cursor", + "created_at": "createdAt", + "updated_at": "updatedAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class Task(BaseModel): """ Task """ - task_id: str = Field(alias="taskID") + task_id: str """ Universally unique identifier (UUID) of a task. """ - source_id: str = Field(alias="sourceID") + source_id: str """ Universally uniqud identifier (UUID) of a source. """ - destination_id: str = Field(alias="destinationID") + destination_id: str """ Universally unique identifier (UUID) of a destination resource. """ - cron: Optional[str] = Field(default=None, alias="cron") + cron: Optional[str] = None """ Cron expression for the task's schedule. """ - last_run: Optional[str] = Field(default=None, alias="lastRun") + last_run: Optional[str] = None """ The last time the scheduled task ran in RFC 3339 format. """ - next_run: Optional[str] = Field(default=None, alias="nextRun") + next_run: Optional[str] = None """ The next scheduled run of the task in RFC 3339 format. """ - input: Optional[TaskInput] = Field(default=None, alias="input") - enabled: bool = Field(alias="enabled") + input: Optional[TaskInput] = None + enabled: bool """ Whether the task is enabled. """ - failure_threshold: Optional[int] = Field(default=None, alias="failureThreshold") + failure_threshold: Optional[int] = None """ Maximum accepted percentage of failures for a task run to finish successfully. """ - action: ActionType = Field(alias="action") - cursor: Optional[str] = Field(default=None, alias="cursor") + action: ActionType + cursor: Optional[str] = None """ Date of the last cursor in RFC 3339 format. """ - created_at: str = Field(alias="createdAt") + created_at: str """ Date of creation in RFC 3339 format. """ - updated_at: Optional[str] = Field(default=None, alias="updatedAt") + updated_at: Optional[str] = None """ Date of last update in RFC 3339 format. """ model_config = ConfigDict( @@ -57,6 +77,7 @@ class Task(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/task_create.py b/algoliasearch/ingestion/models/task_create.py index 3784cd903..be6683977 100644 --- a/algoliasearch/ingestion/models/task_create.py +++ b/algoliasearch/ingestion/models/task_create.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,25 +21,40 @@ from algoliasearch.ingestion.models.action_type import ActionType from algoliasearch.ingestion.models.task_input import TaskInput +_ALIASES = { + "source_id": "sourceID", + "destination_id": "destinationID", + "action": "action", + "cron": "cron", + "enabled": "enabled", + "failure_threshold": "failureThreshold", + "input": "input", + "cursor": "cursor", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class TaskCreate(BaseModel): """ API request body for creating a task. """ - source_id: str = Field(alias="sourceID") + source_id: str """ Universally uniqud identifier (UUID) of a source. """ - destination_id: str = Field(alias="destinationID") + destination_id: str """ Universally unique identifier (UUID) of a destination resource. """ - action: ActionType = Field(alias="action") - cron: Optional[str] = Field(default=None, alias="cron") + action: ActionType + cron: Optional[str] = None """ Cron expression for the task's schedule. """ - enabled: Optional[bool] = Field(default=None, alias="enabled") + enabled: Optional[bool] = None """ Whether the task is enabled. """ - failure_threshold: Optional[int] = Field(default=None, alias="failureThreshold") + failure_threshold: Optional[int] = None """ Maximum accepted percentage of failures for a task run to finish successfully. """ - input: Optional[TaskInput] = Field(default=None, alias="input") - cursor: Optional[str] = Field(default=None, alias="cursor") + input: Optional[TaskInput] = None + cursor: Optional[str] = None """ Date of the last cursor in RFC 3339 format. """ model_config = ConfigDict( @@ -47,6 +62,7 @@ class TaskCreate(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/task_create_response.py b/algoliasearch/ingestion/models/task_create_response.py index 01752beca..ed45ce3e8 100644 --- a/algoliasearch/ingestion/models/task_create_response.py +++ b/algoliasearch/ingestion/models/task_create_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "task_id": "taskID", + "created_at": "createdAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class TaskCreateResponse(BaseModel): """ API response for creating a task. """ - task_id: str = Field(alias="taskID") + task_id: str """ Universally unique identifier (UUID) of a task. """ - created_at: str = Field(alias="createdAt") + created_at: str """ Date of creation in RFC 3339 format. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class TaskCreateResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/task_create_trigger.py b/algoliasearch/ingestion/models/task_create_trigger.py index 2104cd227..eb10a67ec 100644 --- a/algoliasearch/ingestion/models/task_create_trigger.py +++ b/algoliasearch/ingestion/models/task_create_trigger.py @@ -37,13 +37,12 @@ class TaskCreateTrigger(BaseModel): oneof_schema_4_validator: Optional[StreamingTrigger] = Field(default=None) - actual_instance: Optional[ - Union[ - OnDemandTriggerInput, - ScheduleTriggerInput, - StreamingTrigger, - SubscriptionTrigger, - ] + actual_instance: Union[ + OnDemandTriggerInput, + ScheduleTriggerInput, + StreamingTrigger, + SubscriptionTrigger, + None, ] = None one_of_schemas: Set[str] = { "OnDemandTriggerInput", @@ -62,20 +61,20 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer def unwrap_actual_instance( self, - ) -> Optional[ - Union[ - OnDemandTriggerInput, - ScheduleTriggerInput, - StreamingTrigger, - SubscriptionTrigger, - ] + ) -> Union[ + OnDemandTriggerInput, + ScheduleTriggerInput, + StreamingTrigger, + SubscriptionTrigger, + Self, + None, ]: """ Unwraps the `actual_instance` when calling the `to_json` method. @@ -129,9 +128,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -151,8 +150,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/ingestion/models/task_create_v1.py b/algoliasearch/ingestion/models/task_create_v1.py index d6dfed268..1796c80a4 100644 --- a/algoliasearch/ingestion/models/task_create_v1.py +++ b/algoliasearch/ingestion/models/task_create_v1.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,24 +22,39 @@ from algoliasearch.ingestion.models.task_create_trigger import TaskCreateTrigger from algoliasearch.ingestion.models.task_input import TaskInput +_ALIASES = { + "source_id": "sourceID", + "destination_id": "destinationID", + "trigger": "trigger", + "action": "action", + "enabled": "enabled", + "failure_threshold": "failureThreshold", + "input": "input", + "cursor": "cursor", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class TaskCreateV1(BaseModel): """ API request body for creating a task using the V1 shape, please use methods and types that don't contain the V1 suffix. """ - source_id: str = Field(alias="sourceID") + source_id: str """ Universally uniqud identifier (UUID) of a source. """ - destination_id: str = Field(alias="destinationID") + destination_id: str """ Universally unique identifier (UUID) of a destination resource. """ - trigger: TaskCreateTrigger = Field(alias="trigger") - action: ActionType = Field(alias="action") - enabled: Optional[bool] = Field(default=None, alias="enabled") + trigger: TaskCreateTrigger + action: ActionType + enabled: Optional[bool] = None """ Whether the task is enabled. """ - failure_threshold: Optional[int] = Field(default=None, alias="failureThreshold") + failure_threshold: Optional[int] = None """ Maximum accepted percentage of failures for a task run to finish successfully. """ - input: Optional[TaskInput] = Field(default=None, alias="input") - cursor: Optional[str] = Field(default=None, alias="cursor") + input: Optional[TaskInput] = None + cursor: Optional[str] = None """ Date of the last cursor in RFC 3339 format. """ model_config = ConfigDict( @@ -47,6 +62,7 @@ class TaskCreateV1(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/task_input.py b/algoliasearch/ingestion/models/task_input.py index 3f0b39aff..8830472f1 100644 --- a/algoliasearch/ingestion/models/task_input.py +++ b/algoliasearch/ingestion/models/task_input.py @@ -34,9 +34,9 @@ class TaskInput(BaseModel): oneof_schema_3_validator: Optional[ShopifyInput] = Field(default=None) - actual_instance: Optional[ - Union[DockerStreamsInput, ShopifyInput, StreamingInput] - ] = None + actual_instance: Union[DockerStreamsInput, ShopifyInput, StreamingInput, None] = ( + None + ) one_of_schemas: Set[str] = {"DockerStreamsInput", "ShopifyInput", "StreamingInput"} def __init__(self, *args, **kwargs) -> None: @@ -49,14 +49,14 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer def unwrap_actual_instance( self, - ) -> Optional[Union[DockerStreamsInput, ShopifyInput, StreamingInput]]: + ) -> Union[DockerStreamsInput, ShopifyInput, StreamingInput, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -103,9 +103,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -119,8 +119,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/ingestion/models/task_search.py b/algoliasearch/ingestion/models/task_search.py index 9eca11dc6..d264897c6 100644 --- a/algoliasearch/ingestion/models/task_search.py +++ b/algoliasearch/ingestion/models/task_search.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,28 @@ from typing_extensions import Self +_ALIASES = { + "task_ids": "taskIDs", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class TaskSearch(BaseModel): """ TaskSearch """ - task_ids: List[str] = Field(alias="taskIDs") + task_ids: List[str] model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/task_update.py b/algoliasearch/ingestion/models/task_update.py index 91f219ce2..b81b6f93f 100644 --- a/algoliasearch/ingestion/models/task_update.py +++ b/algoliasearch/ingestion/models/task_update.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,20 +20,32 @@ from algoliasearch.ingestion.models.task_input import TaskInput +_ALIASES = { + "destination_id": "destinationID", + "cron": "cron", + "input": "input", + "enabled": "enabled", + "failure_threshold": "failureThreshold", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class TaskUpdate(BaseModel): """ API request body for updating a task. """ - destination_id: Optional[str] = Field(default=None, alias="destinationID") + destination_id: Optional[str] = None """ Universally unique identifier (UUID) of a destination resource. """ - cron: Optional[str] = Field(default=None, alias="cron") + cron: Optional[str] = None """ Cron expression for the task's schedule. """ - input: Optional[TaskInput] = Field(default=None, alias="input") - enabled: Optional[bool] = Field(default=None, alias="enabled") + input: Optional[TaskInput] = None + enabled: Optional[bool] = None """ Whether the task is enabled. """ - failure_threshold: Optional[int] = Field(default=None, alias="failureThreshold") + failure_threshold: Optional[int] = None """ Maximum accepted percentage of failures for a task run to finish successfully. """ model_config = ConfigDict( @@ -41,6 +53,7 @@ class TaskUpdate(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/task_update_response.py b/algoliasearch/ingestion/models/task_update_response.py index c3f32e70f..acd62e09f 100644 --- a/algoliasearch/ingestion/models/task_update_response.py +++ b/algoliasearch/ingestion/models/task_update_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "task_id": "taskID", + "updated_at": "updatedAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class TaskUpdateResponse(BaseModel): """ API response for updating a task. """ - task_id: str = Field(alias="taskID") + task_id: str """ Universally unique identifier (UUID) of a task. """ - updated_at: str = Field(alias="updatedAt") + updated_at: str """ Date of last update in RFC 3339 format. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class TaskUpdateResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/task_update_v1.py b/algoliasearch/ingestion/models/task_update_v1.py index 791d0788f..23cf427d2 100644 --- a/algoliasearch/ingestion/models/task_update_v1.py +++ b/algoliasearch/ingestion/models/task_update_v1.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,19 +21,31 @@ from algoliasearch.ingestion.models.task_input import TaskInput from algoliasearch.ingestion.models.trigger_update_input import TriggerUpdateInput +_ALIASES = { + "destination_id": "destinationID", + "trigger": "trigger", + "input": "input", + "enabled": "enabled", + "failure_threshold": "failureThreshold", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class TaskUpdateV1(BaseModel): """ API request body for updating a task using the V1 shape, please use methods and types that don't contain the V1 suffix. """ - destination_id: Optional[str] = Field(default=None, alias="destinationID") + destination_id: Optional[str] = None """ Universally unique identifier (UUID) of a destination resource. """ - trigger: Optional[TriggerUpdateInput] = Field(default=None, alias="trigger") - input: Optional[TaskInput] = Field(default=None, alias="input") - enabled: Optional[bool] = Field(default=None, alias="enabled") + trigger: Optional[TriggerUpdateInput] = None + input: Optional[TaskInput] = None + enabled: Optional[bool] = None """ Whether the task is enabled. """ - failure_threshold: Optional[int] = Field(default=None, alias="failureThreshold") + failure_threshold: Optional[int] = None """ Maximum accepted percentage of failures for a task run to finish successfully. """ model_config = ConfigDict( @@ -41,6 +53,7 @@ class TaskUpdateV1(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/task_v1.py b/algoliasearch/ingestion/models/task_v1.py index a0fa899a4..c9030e09c 100644 --- a/algoliasearch/ingestion/models/task_v1.py +++ b/algoliasearch/ingestion/models/task_v1.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,30 +22,48 @@ from algoliasearch.ingestion.models.task_input import TaskInput from algoliasearch.ingestion.models.trigger import Trigger +_ALIASES = { + "task_id": "taskID", + "source_id": "sourceID", + "destination_id": "destinationID", + "trigger": "trigger", + "input": "input", + "enabled": "enabled", + "failure_threshold": "failureThreshold", + "action": "action", + "cursor": "cursor", + "created_at": "createdAt", + "updated_at": "updatedAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class TaskV1(BaseModel): """ The V1 task object, please use methods and types that don't contain the V1 suffix. """ - task_id: str = Field(alias="taskID") + task_id: str """ Universally unique identifier (UUID) of a task. """ - source_id: str = Field(alias="sourceID") + source_id: str """ Universally uniqud identifier (UUID) of a source. """ - destination_id: str = Field(alias="destinationID") + destination_id: str """ Universally unique identifier (UUID) of a destination resource. """ - trigger: Trigger = Field(alias="trigger") - input: Optional[TaskInput] = Field(default=None, alias="input") - enabled: bool = Field(alias="enabled") + trigger: Trigger + input: Optional[TaskInput] = None + enabled: bool """ Whether the task is enabled. """ - failure_threshold: Optional[int] = Field(default=None, alias="failureThreshold") + failure_threshold: Optional[int] = None """ Maximum accepted percentage of failures for a task run to finish successfully. """ - action: ActionType = Field(alias="action") - cursor: Optional[str] = Field(default=None, alias="cursor") + action: ActionType + cursor: Optional[str] = None """ Date of the last cursor in RFC 3339 format. """ - created_at: str = Field(alias="createdAt") + created_at: str """ Date of creation in RFC 3339 format. """ - updated_at: Optional[str] = Field(default=None, alias="updatedAt") + updated_at: Optional[str] = None """ Date of last update in RFC 3339 format. """ model_config = ConfigDict( @@ -53,6 +71,7 @@ class TaskV1(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/transformation.py b/algoliasearch/ingestion/models/transformation.py index 27ad236ec..7209edb13 100644 --- a/algoliasearch/ingestion/models/transformation.py +++ b/algoliasearch/ingestion/models/transformation.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,26 +18,39 @@ from typing_extensions import Self +_ALIASES = { + "transformation_id": "transformationID", + "authentication_ids": "authenticationIDs", + "code": "code", + "name": "name", + "description": "description", + "created_at": "createdAt", + "updated_at": "updatedAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class Transformation(BaseModel): """ Transformation """ - transformation_id: str = Field(alias="transformationID") + transformation_id: str """ Universally unique identifier (UUID) of a transformation. """ - authentication_ids: Optional[List[str]] = Field( - default=None, alias="authenticationIDs" - ) + authentication_ids: Optional[List[str]] = None """ The authentications associated for the current transformation. """ - code: str = Field(alias="code") + code: str """ The source code of the transformation. """ - name: str = Field(alias="name") + name: str """ The uniquely identified name of your transformation. """ - description: Optional[str] = Field(default=None, alias="description") + description: Optional[str] = None """ A descriptive name for your transformation of what it does. """ - created_at: str = Field(alias="createdAt") + created_at: str """ Date of creation in RFC 3339 format. """ - updated_at: Optional[str] = Field(default=None, alias="updatedAt") + updated_at: Optional[str] = None """ Date of last update in RFC 3339 format. """ model_config = ConfigDict( @@ -45,6 +58,7 @@ class Transformation(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/transformation_create.py b/algoliasearch/ingestion/models/transformation_create.py index 0fe05e679..aeb86f6da 100644 --- a/algoliasearch/ingestion/models/transformation_create.py +++ b/algoliasearch/ingestion/models/transformation_create.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,20 +18,30 @@ from typing_extensions import Self +_ALIASES = { + "code": "code", + "name": "name", + "description": "description", + "authentication_ids": "authenticationIDs", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class TransformationCreate(BaseModel): """ API request body for creating a transformation. """ - code: str = Field(alias="code") + code: str """ The source code of the transformation. """ - name: str = Field(alias="name") + name: str """ The uniquely identified name of your transformation. """ - description: Optional[str] = Field(default=None, alias="description") + description: Optional[str] = None """ A descriptive name for your transformation of what it does. """ - authentication_ids: Optional[List[str]] = Field( - default=None, alias="authenticationIDs" - ) + authentication_ids: Optional[List[str]] = None """ The authentications associated for the current transformation. """ model_config = ConfigDict( @@ -39,6 +49,7 @@ class TransformationCreate(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/transformation_create_response.py b/algoliasearch/ingestion/models/transformation_create_response.py index 8fa2fc633..70701b6d5 100644 --- a/algoliasearch/ingestion/models/transformation_create_response.py +++ b/algoliasearch/ingestion/models/transformation_create_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "transformation_id": "transformationID", + "created_at": "createdAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class TransformationCreateResponse(BaseModel): """ API response for creating a transformation. """ - transformation_id: str = Field(alias="transformationID") + transformation_id: str """ Universally unique identifier (UUID) of a transformation. """ - created_at: str = Field(alias="createdAt") + created_at: str """ Date of creation in RFC 3339 format. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class TransformationCreateResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/transformation_error.py b/algoliasearch/ingestion/models/transformation_error.py index bf54942a0..b5f0fb8b8 100644 --- a/algoliasearch/ingestion/models/transformation_error.py +++ b/algoliasearch/ingestion/models/transformation_error.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "code": "code", + "message": "message", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class TransformationError(BaseModel): """ The error if the transformation failed. """ - code: Optional[int] = Field(default=None, alias="code") + code: Optional[int] = None """ The error status code. """ - message: Optional[str] = Field(default=None, alias="message") + message: Optional[str] = None """ A descriptive message explaining the failure. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class TransformationError(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/transformation_search.py b/algoliasearch/ingestion/models/transformation_search.py index d9b98c921..28980edc2 100644 --- a/algoliasearch/ingestion/models/transformation_search.py +++ b/algoliasearch/ingestion/models/transformation_search.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,28 @@ from typing_extensions import Self +_ALIASES = { + "transformation_ids": "transformationIDs", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class TransformationSearch(BaseModel): """ TransformationSearch """ - transformation_ids: List[str] = Field(alias="transformationIDs") + transformation_ids: List[str] model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/transformation_try.py b/algoliasearch/ingestion/models/transformation_try.py index b616077d3..644ea2ad1 100644 --- a/algoliasearch/ingestion/models/transformation_try.py +++ b/algoliasearch/ingestion/models/transformation_try.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,25 +20,34 @@ from algoliasearch.ingestion.models.authentication_create import AuthenticationCreate +_ALIASES = { + "code": "code", + "sample_record": "sampleRecord", + "authentications": "authentications", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class TransformationTry(BaseModel): """ TransformationTry """ - code: str = Field(alias="code") + code: str """ The source code of the transformation. """ - sample_record: object = Field(alias="sampleRecord") + sample_record: object """ The record to apply the given code to. """ - authentications: Optional[List[AuthenticationCreate]] = Field( - default=None, alias="authentications" - ) + authentications: Optional[List[AuthenticationCreate]] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/transformation_try_response.py b/algoliasearch/ingestion/models/transformation_try_response.py index 8c32bfaea..9ae441252 100644 --- a/algoliasearch/ingestion/models/transformation_try_response.py +++ b/algoliasearch/ingestion/models/transformation_try_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,21 +20,31 @@ from algoliasearch.ingestion.models.transformation_error import TransformationError +_ALIASES = { + "payloads": "payloads", + "error": "error", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class TransformationTryResponse(BaseModel): """ TransformationTryResponse """ - payloads: List[object] = Field(alias="payloads") + payloads: List[object] """ The array of records returned by the transformation service. """ - error: Optional[TransformationError] = Field(default=None, alias="error") + error: Optional[TransformationError] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/transformation_update_response.py b/algoliasearch/ingestion/models/transformation_update_response.py index 7820ef432..cce4713fa 100644 --- a/algoliasearch/ingestion/models/transformation_update_response.py +++ b/algoliasearch/ingestion/models/transformation_update_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "transformation_id": "transformationID", + "updated_at": "updatedAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class TransformationUpdateResponse(BaseModel): """ API response for updating a transformation. """ - transformation_id: str = Field(alias="transformationID") + transformation_id: str """ Universally unique identifier (UUID) of a transformation. """ - updated_at: str = Field(alias="updatedAt") + updated_at: str """ Date of last update in RFC 3339 format. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class TransformationUpdateResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/trigger.py b/algoliasearch/ingestion/models/trigger.py index 1875833eb..5b57c9542 100644 --- a/algoliasearch/ingestion/models/trigger.py +++ b/algoliasearch/ingestion/models/trigger.py @@ -37,8 +37,8 @@ class Trigger(BaseModel): oneof_schema_4_validator: Optional[StreamingTrigger] = Field(default=None) - actual_instance: Optional[ - Union[OnDemandTrigger, ScheduleTrigger, StreamingTrigger, SubscriptionTrigger] + actual_instance: Union[ + OnDemandTrigger, ScheduleTrigger, StreamingTrigger, SubscriptionTrigger, None ] = None one_of_schemas: Set[str] = { "OnDemandTrigger", @@ -57,15 +57,20 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer def unwrap_actual_instance( self, - ) -> Optional[ - Union[OnDemandTrigger, ScheduleTrigger, StreamingTrigger, SubscriptionTrigger] + ) -> Union[ + OnDemandTrigger, + ScheduleTrigger, + StreamingTrigger, + SubscriptionTrigger, + Self, + None, ]: """ Unwraps the `actual_instance` when calling the `to_json` method. @@ -119,9 +124,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -141,8 +146,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/ingestion/models/trigger_update_input.py b/algoliasearch/ingestion/models/trigger_update_input.py index 198284555..7ccd68bbc 100644 --- a/algoliasearch/ingestion/models/trigger_update_input.py +++ b/algoliasearch/ingestion/models/trigger_update_input.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "cron": "cron", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class TriggerUpdateInput(BaseModel): """ Trigger for a task update. """ - cron: str = Field(alias="cron") + cron: str """ Cron expression for the task's schedule. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class TriggerUpdateInput(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/ingestion/models/window.py b/algoliasearch/ingestion/models/window.py index 619adf2e5..4d316d0ca 100644 --- a/algoliasearch/ingestion/models/window.py +++ b/algoliasearch/ingestion/models/window.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "start_date": "startDate", + "end_date": "endDate", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class Window(BaseModel): """ Time window by which to filter the observability data. """ - start_date: str = Field(alias="startDate") + start_date: str """ Date in RFC 3339 format representing the oldest data in the time window. """ - end_date: str = Field(alias="endDate") + end_date: str """ Date in RFC 3339 format representing the newest data in the time window. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class Window(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/insights/client.py b/algoliasearch/insights/client.py index e8490d49f..b32da7720 100644 --- a/algoliasearch/insights/client.py +++ b/algoliasearch/insights/client.py @@ -8,7 +8,7 @@ from json import dumps from sys import version_info -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Dict, Optional, Union from urllib.parse import quote from pydantic import Field, StrictStr @@ -20,8 +20,9 @@ from typing_extensions import Self from algoliasearch.http.api_response import ApiResponse +from algoliasearch.http.base_config import BaseConfig from algoliasearch.http.request_options import RequestOptions -from algoliasearch.http.serializer import bodySerializer +from algoliasearch.http.serializer import body_serializer from algoliasearch.http.transporter import Transporter from algoliasearch.http.transporter_sync import TransporterSync from algoliasearch.http.verb import Verb @@ -49,7 +50,7 @@ class InsightsClient: """ _transporter: Transporter - _config: InsightsConfig + _config: BaseConfig _request_options: RequestOptions def __init__( @@ -61,7 +62,9 @@ def __init__( config: Optional[InsightsConfig] = None, ) -> None: if transporter is not None and config is None: - config = transporter._config + config = InsightsConfig( + transporter.config.app_id, transporter.config.api_key, region + ) if config is None: config = InsightsConfig(app_id, api_key, region) @@ -113,7 +116,7 @@ async def close(self) -> None: async def set_client_api_key(self, api_key: str) -> None: """Sets a new API key to authenticate requests.""" - self._transporter._config.set_client_api_key(api_key) + self._transporter.config.set_client_api_key(api_key) async def custom_delete_with_http_info( self, @@ -146,11 +149,11 @@ async def custom_delete_with_http_info( "Parameter `path` is required when calling `custom_delete`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return await self._transporter.request( verb=Verb.DELETE, @@ -221,11 +224,11 @@ async def custom_get_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_get`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return await self._transporter.request( verb=Verb.GET, @@ -300,11 +303,11 @@ async def custom_post_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_post`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -315,7 +318,7 @@ async def custom_post_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -392,11 +395,11 @@ async def custom_put_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_put`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -407,7 +410,7 @@ async def custom_put_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -514,7 +517,7 @@ async def delete_user_token( async def push_events_with_http_info( self, - insights_events: InsightsEvents, + insights_events: Union[InsightsEvents, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -540,7 +543,7 @@ async def push_events_with_http_info( verb=Verb.POST, path="/1/events", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -548,7 +551,7 @@ async def push_events_with_http_info( async def push_events( self, - insights_events: InsightsEvents, + insights_events: Union[InsightsEvents, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> EventsResponse: """ @@ -583,7 +586,7 @@ class InsightsClientSync: """ _transporter: TransporterSync - _config: InsightsConfig + _config: BaseConfig _request_options: RequestOptions def __init__( @@ -595,7 +598,9 @@ def __init__( config: Optional[InsightsConfig] = None, ) -> None: if transporter is not None and config is None: - config = transporter._config + config = InsightsConfig( + transporter.config.app_id, transporter.config.api_key, region + ) if config is None: config = InsightsConfig(app_id, api_key, region) @@ -646,7 +651,7 @@ def close(self) -> None: def set_client_api_key(self, api_key: str) -> None: """Sets a new API key to authenticate requests.""" - self._transporter._config.set_client_api_key(api_key) + self._transporter.config.set_client_api_key(api_key) def custom_delete_with_http_info( self, @@ -679,11 +684,11 @@ def custom_delete_with_http_info( "Parameter `path` is required when calling `custom_delete`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return self._transporter.request( verb=Verb.DELETE, @@ -752,11 +757,11 @@ def custom_get_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_get`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return self._transporter.request( verb=Verb.GET, @@ -831,11 +836,11 @@ def custom_post_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_post`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -846,7 +851,7 @@ def custom_post_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -921,11 +926,11 @@ def custom_put_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_put`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -936,7 +941,7 @@ def custom_put_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -1041,7 +1046,7 @@ def delete_user_token( def push_events_with_http_info( self, - insights_events: InsightsEvents, + insights_events: Union[InsightsEvents, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -1067,7 +1072,7 @@ def push_events_with_http_info( verb=Verb.POST, path="/1/events", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -1075,7 +1080,7 @@ def push_events_with_http_info( def push_events( self, - insights_events: InsightsEvents, + insights_events: Union[InsightsEvents, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> EventsResponse: """ diff --git a/algoliasearch/insights/config.py b/algoliasearch/insights/config.py index 6c4ef6987..fc430ce97 100644 --- a/algoliasearch/insights/config.py +++ b/algoliasearch/insights/config.py @@ -7,11 +7,19 @@ class InsightsConfig(BaseConfig): - def __init__(self, app_id: str, api_key: str, region: Optional[str] = None) -> None: + def __init__( + self, + app_id: Optional[str], + api_key: Optional[str], + region: Optional[str] = None, + ) -> None: super().__init__(app_id, api_key) user_agent = UserAgent().add("Insights") + assert app_id, "`app_id` is missing." + assert api_key, "`api_key` is missing." + self.headers = { "x-algolia-application-id": app_id, "x-algolia-api-key": api_key, @@ -42,7 +50,9 @@ def __init__(self, app_id: str, api_key: str, region: Optional[str] = None) -> N Host( "insights.algolia.io" if region is None - else "insights.{region}.algolia.io".replace("{region}", region) + else "insights.{region}.algolia.io".replace( + "{region}", region or "" + ) ) ] ) diff --git a/algoliasearch/insights/models/added_to_cart_object_ids.py b/algoliasearch/insights/models/added_to_cart_object_ids.py index e71064f8a..3070681b9 100644 --- a/algoliasearch/insights/models/added_to_cart_object_ids.py +++ b/algoliasearch/insights/models/added_to_cart_object_ids.py @@ -11,7 +11,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field, field_validator +from pydantic import BaseModel, ConfigDict, field_validator if version_info >= (3, 11): from typing import Self @@ -24,33 +24,49 @@ from algoliasearch.insights.models.object_data import ObjectData from algoliasearch.insights.models.value import Value +_ALIASES = { + "event_name": "eventName", + "event_type": "eventType", + "event_subtype": "eventSubtype", + "index": "index", + "object_ids": "objectIDs", + "user_token": "userToken", + "authenticated_user_token": "authenticatedUserToken", + "currency": "currency", + "object_data": "objectData", + "timestamp": "timestamp", + "value": "value", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class AddedToCartObjectIDs(BaseModel): """ Use this event to track when users add items to their shopping cart unrelated to a previous Algolia request. For example, if you don't use Algolia to build your category pages, use this event. To track add-to-cart events related to Algolia requests, use the \"Added to cart object IDs after search\" event. """ - event_name: str = Field(alias="eventName") + event_name: str """ Event name, up to 64 ASCII characters. Consider naming events consistently—for example, by adopting Segment's [object-action](https://segment.com/academy/collecting-data/naming-conventions-for-clean-data/#the-object-action-framework) framework. """ - event_type: ConversionEvent = Field(alias="eventType") - event_subtype: AddToCartEvent = Field(alias="eventSubtype") - index: str = Field(alias="index") + event_type: ConversionEvent + event_subtype: AddToCartEvent + index: str """ Index name (case-sensitive) to which the event's items belong. """ - object_ids: List[str] = Field(alias="objectIDs") + object_ids: List[str] """ Object IDs of the records that are part of the event. """ - user_token: str = Field(alias="userToken") + user_token: str """ Anonymous or pseudonymous user identifier. Don't use personally identifiable information in user tokens. For more information, see [User token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - authenticated_user_token: Optional[str] = Field( - default=None, alias="authenticatedUserToken" - ) + authenticated_user_token: Optional[str] = None """ Identifier for authenticated users. When the user signs in, you can get an identifier from your system and send it as `authenticatedUserToken`. This lets you keep using the `userToken` from before the user signed in, while providing a reliable way to identify users across sessions. Don't use personally identifiable information in user tokens. For more information, see [User token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - currency: Optional[str] = Field(default=None, alias="currency") + currency: Optional[str] = None """ Three-letter [currency code](https://www.iso.org/iso-4217-currency-codes.html). """ - object_data: Optional[List[ObjectData]] = Field(default=None, alias="objectData") + object_data: Optional[List[ObjectData]] = None """ Extra information about the records involved in a purchase or add-to-cart event. If specified, it must have the same length as `objectIDs`. """ - timestamp: Optional[int] = Field(default=None, alias="timestamp") + timestamp: Optional[int] = None """ Timestamp of the event, measured in milliseconds since the Unix epoch. By default, the Insights API uses the time it receives an event as its timestamp. """ - value: Optional[Value] = Field(default=None, alias="value") + value: Optional[Value] = None @field_validator("event_name") def event_name_validate_regular_expression(cls, value): @@ -87,6 +103,7 @@ def authenticated_user_token_validate_regular_expression(cls, value): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/insights/models/added_to_cart_object_ids_after_search.py b/algoliasearch/insights/models/added_to_cart_object_ids_after_search.py index 6cd5d64dd..039f0affb 100644 --- a/algoliasearch/insights/models/added_to_cart_object_ids_after_search.py +++ b/algoliasearch/insights/models/added_to_cart_object_ids_after_search.py @@ -11,7 +11,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field, field_validator +from pydantic import BaseModel, ConfigDict, field_validator if version_info >= (3, 11): from typing import Self @@ -24,37 +24,52 @@ from algoliasearch.insights.models.object_data_after_search import ObjectDataAfterSearch from algoliasearch.insights.models.value import Value +_ALIASES = { + "event_name": "eventName", + "event_type": "eventType", + "event_subtype": "eventSubtype", + "index": "index", + "query_id": "queryID", + "object_ids": "objectIDs", + "user_token": "userToken", + "authenticated_user_token": "authenticatedUserToken", + "currency": "currency", + "object_data": "objectData", + "timestamp": "timestamp", + "value": "value", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class AddedToCartObjectIDsAfterSearch(BaseModel): """ Use this event to track when users add items to their shopping cart after a previous Algolia request. If you're building your category pages with Algolia, you'll also use this event. """ - event_name: str = Field(alias="eventName") + event_name: str """ Event name, up to 64 ASCII characters. Consider naming events consistently—for example, by adopting Segment's [object-action](https://segment.com/academy/collecting-data/naming-conventions-for-clean-data/#the-object-action-framework) framework. """ - event_type: ConversionEvent = Field(alias="eventType") - event_subtype: AddToCartEvent = Field(alias="eventSubtype") - index: str = Field(alias="index") + event_type: ConversionEvent + event_subtype: AddToCartEvent + index: str """ Index name (case-sensitive) to which the event's items belong. """ - query_id: str = Field(alias="queryID") + query_id: str """ Unique identifier for a search query. The query ID is required for events related to search or browse requests. If you add `clickAnalytics: true` as a search request parameter, the query ID is included in the API response. """ - object_ids: List[str] = Field(alias="objectIDs") + object_ids: List[str] """ Object IDs of the records that are part of the event. """ - user_token: str = Field(alias="userToken") + user_token: str """ Anonymous or pseudonymous user identifier. Don't use personally identifiable information in user tokens. For more information, see [User token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - authenticated_user_token: Optional[str] = Field( - default=None, alias="authenticatedUserToken" - ) + authenticated_user_token: Optional[str] = None """ Identifier for authenticated users. When the user signs in, you can get an identifier from your system and send it as `authenticatedUserToken`. This lets you keep using the `userToken` from before the user signed in, while providing a reliable way to identify users across sessions. Don't use personally identifiable information in user tokens. For more information, see [User token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - currency: Optional[str] = Field(default=None, alias="currency") + currency: Optional[str] = None """ Three-letter [currency code](https://www.iso.org/iso-4217-currency-codes.html). """ - object_data: Optional[List[ObjectDataAfterSearch]] = Field( - default=None, alias="objectData" - ) + object_data: Optional[List[ObjectDataAfterSearch]] = None """ Extra information about the records involved in a purchase or add-to-cart events. If provided, it must be the same length as `objectIDs`. """ - timestamp: Optional[int] = Field(default=None, alias="timestamp") + timestamp: Optional[int] = None """ Timestamp of the event, measured in milliseconds since the Unix epoch. By default, the Insights API uses the time it receives an event as its timestamp. """ - value: Optional[Value] = Field(default=None, alias="value") + value: Optional[Value] = None @field_validator("event_name") def event_name_validate_regular_expression(cls, value): @@ -98,6 +113,7 @@ def authenticated_user_token_validate_regular_expression(cls, value): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/insights/models/clicked_filters.py b/algoliasearch/insights/models/clicked_filters.py index b248cba93..43703ce76 100644 --- a/algoliasearch/insights/models/clicked_filters.py +++ b/algoliasearch/insights/models/clicked_filters.py @@ -11,7 +11,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field, field_validator +from pydantic import BaseModel, ConfigDict, field_validator if version_info >= (3, 11): from typing import Self @@ -21,26 +21,38 @@ from algoliasearch.insights.models.click_event import ClickEvent +_ALIASES = { + "event_name": "eventName", + "event_type": "eventType", + "index": "index", + "filters": "filters", + "user_token": "userToken", + "authenticated_user_token": "authenticatedUserToken", + "timestamp": "timestamp", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ClickedFilters(BaseModel): """ Use this event to track when users click facet filters in your user interface. """ - event_name: str = Field(alias="eventName") + event_name: str """ Event name, up to 64 ASCII characters. Consider naming events consistently—for example, by adopting Segment's [object-action](https://segment.com/academy/collecting-data/naming-conventions-for-clean-data/#the-object-action-framework) framework. """ - event_type: ClickEvent = Field(alias="eventType") - index: str = Field(alias="index") + event_type: ClickEvent + index: str """ Index name (case-sensitive) to which the event's items belong. """ - filters: List[str] = Field(alias="filters") + filters: List[str] """ Applied facet filters. Facet filters are `facet:value` pairs. Facet values must be URL-encoded, such as, `discount:10%25`. """ - user_token: str = Field(alias="userToken") + user_token: str """ Anonymous or pseudonymous user identifier. Don't use personally identifiable information in user tokens. For more information, see [User token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - authenticated_user_token: Optional[str] = Field( - default=None, alias="authenticatedUserToken" - ) + authenticated_user_token: Optional[str] = None """ Identifier for authenticated users. When the user signs in, you can get an identifier from your system and send it as `authenticatedUserToken`. This lets you keep using the `userToken` from before the user signed in, while providing a reliable way to identify users across sessions. Don't use personally identifiable information in user tokens. For more information, see [User token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - timestamp: Optional[int] = Field(default=None, alias="timestamp") + timestamp: Optional[int] = None """ Timestamp of the event, measured in milliseconds since the Unix epoch. By default, the Insights API uses the time it receives an event as its timestamp. """ @field_validator("event_name") @@ -78,6 +90,7 @@ def authenticated_user_token_validate_regular_expression(cls, value): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/insights/models/clicked_object_ids.py b/algoliasearch/insights/models/clicked_object_ids.py index de1e332e2..6177fed49 100644 --- a/algoliasearch/insights/models/clicked_object_ids.py +++ b/algoliasearch/insights/models/clicked_object_ids.py @@ -11,7 +11,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field, field_validator +from pydantic import BaseModel, ConfigDict, field_validator if version_info >= (3, 11): from typing import Self @@ -21,26 +21,38 @@ from algoliasearch.insights.models.click_event import ClickEvent +_ALIASES = { + "event_name": "eventName", + "event_type": "eventType", + "index": "index", + "object_ids": "objectIDs", + "user_token": "userToken", + "authenticated_user_token": "authenticatedUserToken", + "timestamp": "timestamp", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ClickedObjectIDs(BaseModel): """ Use this event to track when users click items unrelated to a previous Algolia request. For example, if you don't use Algolia to build your category pages, use this event. To track click events related to Algolia requests, use the \"Clicked object IDs after search\" event. """ - event_name: str = Field(alias="eventName") + event_name: str """ Event name, up to 64 ASCII characters. Consider naming events consistently—for example, by adopting Segment's [object-action](https://segment.com/academy/collecting-data/naming-conventions-for-clean-data/#the-object-action-framework) framework. """ - event_type: ClickEvent = Field(alias="eventType") - index: str = Field(alias="index") + event_type: ClickEvent + index: str """ Index name (case-sensitive) to which the event's items belong. """ - object_ids: List[str] = Field(alias="objectIDs") + object_ids: List[str] """ Object IDs of the records that are part of the event. """ - user_token: str = Field(alias="userToken") + user_token: str """ Anonymous or pseudonymous user identifier. Don't use personally identifiable information in user tokens. For more information, see [User token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - authenticated_user_token: Optional[str] = Field( - default=None, alias="authenticatedUserToken" - ) + authenticated_user_token: Optional[str] = None """ Identifier for authenticated users. When the user signs in, you can get an identifier from your system and send it as `authenticatedUserToken`. This lets you keep using the `userToken` from before the user signed in, while providing a reliable way to identify users across sessions. Don't use personally identifiable information in user tokens. For more information, see [User token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - timestamp: Optional[int] = Field(default=None, alias="timestamp") + timestamp: Optional[int] = None """ Timestamp of the event, measured in milliseconds since the Unix epoch. By default, the Insights API uses the time it receives an event as its timestamp. """ @field_validator("event_name") @@ -78,6 +90,7 @@ def authenticated_user_token_validate_regular_expression(cls, value): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/insights/models/clicked_object_ids_after_search.py b/algoliasearch/insights/models/clicked_object_ids_after_search.py index 6b6b0c6ed..d4ab577e8 100644 --- a/algoliasearch/insights/models/clicked_object_ids_after_search.py +++ b/algoliasearch/insights/models/clicked_object_ids_after_search.py @@ -11,7 +11,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field, field_validator +from pydantic import BaseModel, ConfigDict, field_validator if version_info >= (3, 11): from typing import Self @@ -21,30 +21,44 @@ from algoliasearch.insights.models.click_event import ClickEvent +_ALIASES = { + "event_name": "eventName", + "event_type": "eventType", + "index": "index", + "object_ids": "objectIDs", + "positions": "positions", + "query_id": "queryID", + "user_token": "userToken", + "authenticated_user_token": "authenticatedUserToken", + "timestamp": "timestamp", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ClickedObjectIDsAfterSearch(BaseModel): """ Click event after an Algolia request. Use this event to track when users click items in the search results. If you're building your category pages with Algolia, you'll also use this event. """ - event_name: str = Field(alias="eventName") + event_name: str """ Event name, up to 64 ASCII characters. Consider naming events consistently—for example, by adopting Segment's [object-action](https://segment.com/academy/collecting-data/naming-conventions-for-clean-data/#the-object-action-framework) framework. """ - event_type: ClickEvent = Field(alias="eventType") - index: str = Field(alias="index") + event_type: ClickEvent + index: str """ Index name (case-sensitive) to which the event's items belong. """ - object_ids: List[str] = Field(alias="objectIDs") + object_ids: List[str] """ Object IDs of the records that are part of the event. """ - positions: List[int] = Field(alias="positions") + positions: List[int] """ Position of the clicked item the search results. You must provide 1 `position` for each `objectID`. """ - query_id: str = Field(alias="queryID") + query_id: str """ Unique identifier for a search query. The query ID is required for events related to search or browse requests. If you add `clickAnalytics: true` as a search request parameter, the query ID is included in the API response. """ - user_token: str = Field(alias="userToken") + user_token: str """ Anonymous or pseudonymous user identifier. Don't use personally identifiable information in user tokens. For more information, see [User token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - authenticated_user_token: Optional[str] = Field( - default=None, alias="authenticatedUserToken" - ) + authenticated_user_token: Optional[str] = None """ Identifier for authenticated users. When the user signs in, you can get an identifier from your system and send it as `authenticatedUserToken`. This lets you keep using the `userToken` from before the user signed in, while providing a reliable way to identify users across sessions. Don't use personally identifiable information in user tokens. For more information, see [User token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - timestamp: Optional[int] = Field(default=None, alias="timestamp") + timestamp: Optional[int] = None """ Timestamp of the event, measured in milliseconds since the Unix epoch. By default, the Insights API uses the time it receives an event as its timestamp. """ @field_validator("event_name") @@ -89,6 +103,7 @@ def authenticated_user_token_validate_regular_expression(cls, value): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/insights/models/converted_filters.py b/algoliasearch/insights/models/converted_filters.py index c90f13687..05abf8fea 100644 --- a/algoliasearch/insights/models/converted_filters.py +++ b/algoliasearch/insights/models/converted_filters.py @@ -11,7 +11,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field, field_validator +from pydantic import BaseModel, ConfigDict, field_validator if version_info >= (3, 11): from typing import Self @@ -21,26 +21,38 @@ from algoliasearch.insights.models.conversion_event import ConversionEvent +_ALIASES = { + "event_name": "eventName", + "event_type": "eventType", + "index": "index", + "filters": "filters", + "user_token": "userToken", + "authenticated_user_token": "authenticatedUserToken", + "timestamp": "timestamp", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ConvertedFilters(BaseModel): """ ConvertedFilters """ - event_name: str = Field(alias="eventName") + event_name: str """ Event name, up to 64 ASCII characters. Consider naming events consistently—for example, by adopting Segment's [object-action](https://segment.com/academy/collecting-data/naming-conventions-for-clean-data/#the-object-action-framework) framework. """ - event_type: ConversionEvent = Field(alias="eventType") - index: str = Field(alias="index") + event_type: ConversionEvent + index: str """ Index name (case-sensitive) to which the event's items belong. """ - filters: List[str] = Field(alias="filters") + filters: List[str] """ Applied facet filters. Facet filters are `facet:value` pairs. Facet values must be URL-encoded, such as, `discount:10%25`. """ - user_token: str = Field(alias="userToken") + user_token: str """ Anonymous or pseudonymous user identifier. Don't use personally identifiable information in user tokens. For more information, see [User token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - authenticated_user_token: Optional[str] = Field( - default=None, alias="authenticatedUserToken" - ) + authenticated_user_token: Optional[str] = None """ Identifier for authenticated users. When the user signs in, you can get an identifier from your system and send it as `authenticatedUserToken`. This lets you keep using the `userToken` from before the user signed in, while providing a reliable way to identify users across sessions. Don't use personally identifiable information in user tokens. For more information, see [User token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - timestamp: Optional[int] = Field(default=None, alias="timestamp") + timestamp: Optional[int] = None """ Timestamp of the event, measured in milliseconds since the Unix epoch. By default, the Insights API uses the time it receives an event as its timestamp. """ @field_validator("event_name") @@ -78,6 +90,7 @@ def authenticated_user_token_validate_regular_expression(cls, value): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/insights/models/converted_object_ids.py b/algoliasearch/insights/models/converted_object_ids.py index 19aa55a88..d9e99bf30 100644 --- a/algoliasearch/insights/models/converted_object_ids.py +++ b/algoliasearch/insights/models/converted_object_ids.py @@ -11,7 +11,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field, field_validator +from pydantic import BaseModel, ConfigDict, field_validator if version_info >= (3, 11): from typing import Self @@ -21,26 +21,38 @@ from algoliasearch.insights.models.conversion_event import ConversionEvent +_ALIASES = { + "event_name": "eventName", + "event_type": "eventType", + "index": "index", + "object_ids": "objectIDs", + "user_token": "userToken", + "authenticated_user_token": "authenticatedUserToken", + "timestamp": "timestamp", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ConvertedObjectIDs(BaseModel): """ Use this event to track when users convert on items unrelated to a previous Algolia request. For example, if you don't use Algolia to build your category pages, use this event. To track conversion events related to Algolia requests, use the \"Converted object IDs after search\" event. """ - event_name: str = Field(alias="eventName") + event_name: str """ Event name, up to 64 ASCII characters. Consider naming events consistently—for example, by adopting Segment's [object-action](https://segment.com/academy/collecting-data/naming-conventions-for-clean-data/#the-object-action-framework) framework. """ - event_type: ConversionEvent = Field(alias="eventType") - index: str = Field(alias="index") + event_type: ConversionEvent + index: str """ Index name (case-sensitive) to which the event's items belong. """ - object_ids: List[str] = Field(alias="objectIDs") + object_ids: List[str] """ Object IDs of the records that are part of the event. """ - user_token: str = Field(alias="userToken") + user_token: str """ Anonymous or pseudonymous user identifier. Don't use personally identifiable information in user tokens. For more information, see [User token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - authenticated_user_token: Optional[str] = Field( - default=None, alias="authenticatedUserToken" - ) + authenticated_user_token: Optional[str] = None """ Identifier for authenticated users. When the user signs in, you can get an identifier from your system and send it as `authenticatedUserToken`. This lets you keep using the `userToken` from before the user signed in, while providing a reliable way to identify users across sessions. Don't use personally identifiable information in user tokens. For more information, see [User token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - timestamp: Optional[int] = Field(default=None, alias="timestamp") + timestamp: Optional[int] = None """ Timestamp of the event, measured in milliseconds since the Unix epoch. By default, the Insights API uses the time it receives an event as its timestamp. """ @field_validator("event_name") @@ -78,6 +90,7 @@ def authenticated_user_token_validate_regular_expression(cls, value): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/insights/models/converted_object_ids_after_search.py b/algoliasearch/insights/models/converted_object_ids_after_search.py index 7a9805803..c210181ac 100644 --- a/algoliasearch/insights/models/converted_object_ids_after_search.py +++ b/algoliasearch/insights/models/converted_object_ids_after_search.py @@ -11,7 +11,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field, field_validator +from pydantic import BaseModel, ConfigDict, field_validator if version_info >= (3, 11): from typing import Self @@ -21,28 +21,41 @@ from algoliasearch.insights.models.conversion_event import ConversionEvent +_ALIASES = { + "event_name": "eventName", + "event_type": "eventType", + "index": "index", + "object_ids": "objectIDs", + "query_id": "queryID", + "user_token": "userToken", + "authenticated_user_token": "authenticatedUserToken", + "timestamp": "timestamp", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ConvertedObjectIDsAfterSearch(BaseModel): """ Use this event to track when users convert after a previous Algolia request. For example, a user clicks on an item in the search results to view the product detail page. Then, the user adds the item to their shopping cart. If you're building your category pages with Algolia, you'll also use this event. """ - event_name: str = Field(alias="eventName") + event_name: str """ Event name, up to 64 ASCII characters. Consider naming events consistently—for example, by adopting Segment's [object-action](https://segment.com/academy/collecting-data/naming-conventions-for-clean-data/#the-object-action-framework) framework. """ - event_type: ConversionEvent = Field(alias="eventType") - index: str = Field(alias="index") + event_type: ConversionEvent + index: str """ Index name (case-sensitive) to which the event's items belong. """ - object_ids: List[str] = Field(alias="objectIDs") + object_ids: List[str] """ Object IDs of the records that are part of the event. """ - query_id: str = Field(alias="queryID") + query_id: str """ Unique identifier for a search query. The query ID is required for events related to search or browse requests. If you add `clickAnalytics: true` as a search request parameter, the query ID is included in the API response. """ - user_token: str = Field(alias="userToken") + user_token: str """ Anonymous or pseudonymous user identifier. Don't use personally identifiable information in user tokens. For more information, see [User token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - authenticated_user_token: Optional[str] = Field( - default=None, alias="authenticatedUserToken" - ) + authenticated_user_token: Optional[str] = None """ Identifier for authenticated users. When the user signs in, you can get an identifier from your system and send it as `authenticatedUserToken`. This lets you keep using the `userToken` from before the user signed in, while providing a reliable way to identify users across sessions. Don't use personally identifiable information in user tokens. For more information, see [User token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - timestamp: Optional[int] = Field(default=None, alias="timestamp") + timestamp: Optional[int] = None """ Timestamp of the event, measured in milliseconds since the Unix epoch. By default, the Insights API uses the time it receives an event as its timestamp. """ @field_validator("event_name") @@ -87,6 +100,7 @@ def authenticated_user_token_validate_regular_expression(cls, value): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/insights/models/discount.py b/algoliasearch/insights/models/discount.py index cefe926e5..2dbacfead 100644 --- a/algoliasearch/insights/models/discount.py +++ b/algoliasearch/insights/models/discount.py @@ -27,7 +27,7 @@ class Discount(BaseModel): oneof_schema_2_validator: Optional[str] = Field(default=None) - actual_instance: Optional[Union[float, str]] = None + actual_instance: Union[float, str, None] = None one_of_schemas: Set[str] = {"float", "str"} def __init__(self, *args, **kwargs) -> None: @@ -40,12 +40,12 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[float, str]]: + def unwrap_actual_instance(self) -> Union[float, str, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -88,9 +88,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -100,8 +100,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], float, str]]: return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/insights/models/error_base.py b/algoliasearch/insights/models/error_base.py index 075d8a3ac..01e47a756 100644 --- a/algoliasearch/insights/models/error_base.py +++ b/algoliasearch/insights/models/error_base.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,28 @@ from typing_extensions import Self +_ALIASES = { + "message": "message", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class ErrorBase(BaseModel): """ Error. """ - message: Optional[str] = Field(default=None, alias="message") + message: Optional[str] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, extra="allow", ) diff --git a/algoliasearch/insights/models/events_items.py b/algoliasearch/insights/models/events_items.py index b5b47be2f..b589a4d15 100644 --- a/algoliasearch/insights/models/events_items.py +++ b/algoliasearch/insights/models/events_items.py @@ -77,21 +77,20 @@ class EventsItems(BaseModel): oneof_schema_12_validator: Optional[ViewedFilters] = Field(default=None) - actual_instance: Optional[ - Union[ - AddedToCartObjectIDs, - AddedToCartObjectIDsAfterSearch, - ClickedFilters, - ClickedObjectIDs, - ClickedObjectIDsAfterSearch, - ConvertedFilters, - ConvertedObjectIDs, - ConvertedObjectIDsAfterSearch, - PurchasedObjectIDs, - PurchasedObjectIDsAfterSearch, - ViewedFilters, - ViewedObjectIDs, - ] + actual_instance: Union[ + AddedToCartObjectIDs, + AddedToCartObjectIDsAfterSearch, + ClickedFilters, + ClickedObjectIDs, + ClickedObjectIDsAfterSearch, + ConvertedFilters, + ConvertedObjectIDs, + ConvertedObjectIDsAfterSearch, + PurchasedObjectIDs, + PurchasedObjectIDsAfterSearch, + ViewedFilters, + ViewedObjectIDs, + None, ] = None one_of_schemas: Set[str] = { "AddedToCartObjectIDs", @@ -118,28 +117,28 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer def unwrap_actual_instance( self, - ) -> Optional[ - Union[ - AddedToCartObjectIDs, - AddedToCartObjectIDsAfterSearch, - ClickedFilters, - ClickedObjectIDs, - ClickedObjectIDsAfterSearch, - ConvertedFilters, - ConvertedObjectIDs, - ConvertedObjectIDsAfterSearch, - PurchasedObjectIDs, - PurchasedObjectIDsAfterSearch, - ViewedFilters, - ViewedObjectIDs, - ] + ) -> Union[ + AddedToCartObjectIDs, + AddedToCartObjectIDsAfterSearch, + ClickedFilters, + ClickedObjectIDs, + ClickedObjectIDsAfterSearch, + ConvertedFilters, + ConvertedObjectIDs, + ConvertedObjectIDsAfterSearch, + PurchasedObjectIDs, + PurchasedObjectIDsAfterSearch, + ViewedFilters, + ViewedObjectIDs, + Self, + None, ]: """ Unwraps the `actual_instance` when calling the `to_json` method. @@ -243,9 +242,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -273,8 +272,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/insights/models/events_response.py b/algoliasearch/insights/models/events_response.py index 0ac3d4d0e..cff183dc9 100644 --- a/algoliasearch/insights/models/events_response.py +++ b/algoliasearch/insights/models/events_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "message": "message", + "status": "status", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class EventsResponse(BaseModel): """ The response of the Insights API. """ - message: Optional[str] = Field(default=None, alias="message") + message: Optional[str] = None """ Details about the response, such as error messages. """ - status: Optional[int] = Field(default=None, alias="status") + status: Optional[int] = None """ The HTTP status code of the response. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class EventsResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/insights/models/insights_events.py b/algoliasearch/insights/models/insights_events.py index a071f4f9d..c67b293bd 100644 --- a/algoliasearch/insights/models/insights_events.py +++ b/algoliasearch/insights/models/insights_events.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,13 +20,21 @@ from algoliasearch.insights.models.events_items import EventsItems +_ALIASES = { + "events": "events", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class InsightsEvents(BaseModel): """ InsightsEvents """ - events: List[EventsItems] = Field(alias="events") + events: List[EventsItems] """ Click and conversion events. **All** events must be valid, otherwise the API returns an error. """ model_config = ConfigDict( @@ -34,6 +42,7 @@ class InsightsEvents(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/insights/models/object_data.py b/algoliasearch/insights/models/object_data.py index df0ab0ef8..aafa8ccb2 100644 --- a/algoliasearch/insights/models/object_data.py +++ b/algoliasearch/insights/models/object_data.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,22 +21,33 @@ from algoliasearch.insights.models.discount import Discount from algoliasearch.insights.models.price import Price +_ALIASES = { + "price": "price", + "quantity": "quantity", + "discount": "discount", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ObjectData(BaseModel): """ ObjectData """ - price: Optional[Price] = Field(default=None, alias="price") - quantity: Optional[int] = Field(default=None, alias="quantity") + price: Optional[Price] = None + quantity: Optional[int] = None """ Quantity of a product that has been purchased or added to the cart. The total purchase value is the sum of `quantity` multiplied with the `price` for each purchased item. """ - discount: Optional[Discount] = Field(default=None, alias="discount") + discount: Optional[Discount] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/insights/models/object_data_after_search.py b/algoliasearch/insights/models/object_data_after_search.py index b6356a8c3..a6e4090da 100644 --- a/algoliasearch/insights/models/object_data_after_search.py +++ b/algoliasearch/insights/models/object_data_after_search.py @@ -11,7 +11,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field, field_validator +from pydantic import BaseModel, ConfigDict, field_validator if version_info >= (3, 11): from typing import Self @@ -22,18 +22,29 @@ from algoliasearch.insights.models.discount import Discount from algoliasearch.insights.models.price import Price +_ALIASES = { + "query_id": "queryID", + "price": "price", + "quantity": "quantity", + "discount": "discount", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ObjectDataAfterSearch(BaseModel): """ ObjectDataAfterSearch """ - query_id: Optional[str] = Field(default=None, alias="queryID") + query_id: Optional[str] = None """ Unique identifier for a search query, used to track purchase events with multiple records that originate from different searches. """ - price: Optional[Price] = Field(default=None, alias="price") - quantity: Optional[int] = Field(default=None, alias="quantity") + price: Optional[Price] = None + quantity: Optional[int] = None """ Quantity of a product that has been purchased or added to the cart. The total purchase value is the sum of `quantity` multiplied with the `price` for each purchased item. """ - discount: Optional[Discount] = Field(default=None, alias="discount") + discount: Optional[Discount] = None @field_validator("query_id") def query_id_validate_regular_expression(cls, value): @@ -50,6 +61,7 @@ def query_id_validate_regular_expression(cls, value): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/insights/models/price.py b/algoliasearch/insights/models/price.py index 8b477a7c3..3dcdd4458 100644 --- a/algoliasearch/insights/models/price.py +++ b/algoliasearch/insights/models/price.py @@ -27,7 +27,7 @@ class Price(BaseModel): oneof_schema_2_validator: Optional[str] = Field(default=None) - actual_instance: Optional[Union[float, str]] = None + actual_instance: Union[float, str, None] = None one_of_schemas: Set[str] = {"float", "str"} def __init__(self, *args, **kwargs) -> None: @@ -40,12 +40,12 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[float, str]]: + def unwrap_actual_instance(self) -> Union[float, str, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -88,9 +88,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -100,8 +100,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], float, str]]: return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/insights/models/purchased_object_ids.py b/algoliasearch/insights/models/purchased_object_ids.py index f8678faaa..7d3baffcf 100644 --- a/algoliasearch/insights/models/purchased_object_ids.py +++ b/algoliasearch/insights/models/purchased_object_ids.py @@ -11,7 +11,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field, field_validator +from pydantic import BaseModel, ConfigDict, field_validator if version_info >= (3, 11): from typing import Self @@ -24,33 +24,49 @@ from algoliasearch.insights.models.purchase_event import PurchaseEvent from algoliasearch.insights.models.value import Value +_ALIASES = { + "event_name": "eventName", + "event_type": "eventType", + "event_subtype": "eventSubtype", + "index": "index", + "object_ids": "objectIDs", + "user_token": "userToken", + "authenticated_user_token": "authenticatedUserToken", + "currency": "currency", + "object_data": "objectData", + "timestamp": "timestamp", + "value": "value", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class PurchasedObjectIDs(BaseModel): """ Use this event to track when users make a purchase unrelated to a previous Algolia request. For example, if you don't use Algolia to build your category pages, use this event. To track purchase events related to Algolia requests, use the \"Purchased object IDs after search\" event. """ - event_name: str = Field(alias="eventName") + event_name: str """ Event name, up to 64 ASCII characters. Consider naming events consistently—for example, by adopting Segment's [object-action](https://segment.com/academy/collecting-data/naming-conventions-for-clean-data/#the-object-action-framework) framework. """ - event_type: ConversionEvent = Field(alias="eventType") - event_subtype: PurchaseEvent = Field(alias="eventSubtype") - index: str = Field(alias="index") + event_type: ConversionEvent + event_subtype: PurchaseEvent + index: str """ Index name (case-sensitive) to which the event's items belong. """ - object_ids: List[str] = Field(alias="objectIDs") + object_ids: List[str] """ Object IDs of the records that are part of the event. """ - user_token: str = Field(alias="userToken") + user_token: str """ Anonymous or pseudonymous user identifier. Don't use personally identifiable information in user tokens. For more information, see [User token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - authenticated_user_token: Optional[str] = Field( - default=None, alias="authenticatedUserToken" - ) + authenticated_user_token: Optional[str] = None """ Identifier for authenticated users. When the user signs in, you can get an identifier from your system and send it as `authenticatedUserToken`. This lets you keep using the `userToken` from before the user signed in, while providing a reliable way to identify users across sessions. Don't use personally identifiable information in user tokens. For more information, see [User token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - currency: Optional[str] = Field(default=None, alias="currency") + currency: Optional[str] = None """ Three-letter [currency code](https://www.iso.org/iso-4217-currency-codes.html). """ - object_data: Optional[List[ObjectData]] = Field(default=None, alias="objectData") + object_data: Optional[List[ObjectData]] = None """ Extra information about the records involved in a purchase or add-to-cart event. If specified, it must have the same length as `objectIDs`. """ - timestamp: Optional[int] = Field(default=None, alias="timestamp") + timestamp: Optional[int] = None """ Timestamp of the event, measured in milliseconds since the Unix epoch. By default, the Insights API uses the time it receives an event as its timestamp. """ - value: Optional[Value] = Field(default=None, alias="value") + value: Optional[Value] = None @field_validator("event_name") def event_name_validate_regular_expression(cls, value): @@ -87,6 +103,7 @@ def authenticated_user_token_validate_regular_expression(cls, value): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/insights/models/purchased_object_ids_after_search.py b/algoliasearch/insights/models/purchased_object_ids_after_search.py index bb1f1dfba..ebc54f9bc 100644 --- a/algoliasearch/insights/models/purchased_object_ids_after_search.py +++ b/algoliasearch/insights/models/purchased_object_ids_after_search.py @@ -11,7 +11,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field, field_validator +from pydantic import BaseModel, ConfigDict, field_validator if version_info >= (3, 11): from typing import Self @@ -24,33 +24,49 @@ from algoliasearch.insights.models.purchase_event import PurchaseEvent from algoliasearch.insights.models.value import Value +_ALIASES = { + "event_name": "eventName", + "event_type": "eventType", + "event_subtype": "eventSubtype", + "index": "index", + "object_ids": "objectIDs", + "user_token": "userToken", + "authenticated_user_token": "authenticatedUserToken", + "currency": "currency", + "object_data": "objectData", + "timestamp": "timestamp", + "value": "value", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class PurchasedObjectIDsAfterSearch(BaseModel): """ Use this event to track when users make a purchase after a previous Algolia request. If you're building your category pages with Algolia, you'll also use this event. """ - event_name: str = Field(alias="eventName") + event_name: str """ Event name, up to 64 ASCII characters. Consider naming events consistently—for example, by adopting Segment's [object-action](https://segment.com/academy/collecting-data/naming-conventions-for-clean-data/#the-object-action-framework) framework. """ - event_type: ConversionEvent = Field(alias="eventType") - event_subtype: PurchaseEvent = Field(alias="eventSubtype") - index: str = Field(alias="index") + event_type: ConversionEvent + event_subtype: PurchaseEvent + index: str """ Index name (case-sensitive) to which the event's items belong. """ - object_ids: List[str] = Field(alias="objectIDs") + object_ids: List[str] """ Object IDs of the records that are part of the event. """ - user_token: str = Field(alias="userToken") + user_token: str """ Anonymous or pseudonymous user identifier. Don't use personally identifiable information in user tokens. For more information, see [User token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - authenticated_user_token: Optional[str] = Field( - default=None, alias="authenticatedUserToken" - ) + authenticated_user_token: Optional[str] = None """ Identifier for authenticated users. When the user signs in, you can get an identifier from your system and send it as `authenticatedUserToken`. This lets you keep using the `userToken` from before the user signed in, while providing a reliable way to identify users across sessions. Don't use personally identifiable information in user tokens. For more information, see [User token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - currency: Optional[str] = Field(default=None, alias="currency") + currency: Optional[str] = None """ Three-letter [currency code](https://www.iso.org/iso-4217-currency-codes.html). """ - object_data: List[ObjectDataAfterSearch] = Field(alias="objectData") + object_data: List[ObjectDataAfterSearch] """ Extra information about the records involved in a purchase or add-to-cart events. If provided, it must be the same length as `objectIDs`. """ - timestamp: Optional[int] = Field(default=None, alias="timestamp") + timestamp: Optional[int] = None """ Timestamp of the event, measured in milliseconds since the Unix epoch. By default, the Insights API uses the time it receives an event as its timestamp. """ - value: Optional[Value] = Field(default=None, alias="value") + value: Optional[Value] = None @field_validator("event_name") def event_name_validate_regular_expression(cls, value): @@ -87,6 +103,7 @@ def authenticated_user_token_validate_regular_expression(cls, value): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/insights/models/value.py b/algoliasearch/insights/models/value.py index 6d8307184..fe6549688 100644 --- a/algoliasearch/insights/models/value.py +++ b/algoliasearch/insights/models/value.py @@ -27,7 +27,7 @@ class Value(BaseModel): oneof_schema_2_validator: Optional[str] = Field(default=None) - actual_instance: Optional[Union[float, str]] = None + actual_instance: Union[float, str, None] = None one_of_schemas: Set[str] = {"float", "str"} def __init__(self, *args, **kwargs) -> None: @@ -40,12 +40,12 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[float, str]]: + def unwrap_actual_instance(self) -> Union[float, str, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -88,9 +88,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -100,8 +100,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], float, str]]: return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/insights/models/viewed_filters.py b/algoliasearch/insights/models/viewed_filters.py index 7ba9156c0..6be4e2f3c 100644 --- a/algoliasearch/insights/models/viewed_filters.py +++ b/algoliasearch/insights/models/viewed_filters.py @@ -11,7 +11,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field, field_validator +from pydantic import BaseModel, ConfigDict, field_validator if version_info >= (3, 11): from typing import Self @@ -21,26 +21,38 @@ from algoliasearch.insights.models.view_event import ViewEvent +_ALIASES = { + "event_name": "eventName", + "event_type": "eventType", + "index": "index", + "filters": "filters", + "user_token": "userToken", + "authenticated_user_token": "authenticatedUserToken", + "timestamp": "timestamp", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ViewedFilters(BaseModel): """ Use this method to capture active filters. For example, when browsing a category page, users see content filtered on that specific category. """ - event_name: str = Field(alias="eventName") + event_name: str """ Event name, up to 64 ASCII characters. Consider naming events consistently—for example, by adopting Segment's [object-action](https://segment.com/academy/collecting-data/naming-conventions-for-clean-data/#the-object-action-framework) framework. """ - event_type: ViewEvent = Field(alias="eventType") - index: str = Field(alias="index") + event_type: ViewEvent + index: str """ Index name (case-sensitive) to which the event's items belong. """ - filters: List[str] = Field(alias="filters") + filters: List[str] """ Applied facet filters. Facet filters are `facet:value` pairs. Facet values must be URL-encoded, such as, `discount:10%25`. """ - user_token: str = Field(alias="userToken") + user_token: str """ Anonymous or pseudonymous user identifier. Don't use personally identifiable information in user tokens. For more information, see [User token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - authenticated_user_token: Optional[str] = Field( - default=None, alias="authenticatedUserToken" - ) + authenticated_user_token: Optional[str] = None """ Identifier for authenticated users. When the user signs in, you can get an identifier from your system and send it as `authenticatedUserToken`. This lets you keep using the `userToken` from before the user signed in, while providing a reliable way to identify users across sessions. Don't use personally identifiable information in user tokens. For more information, see [User token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - timestamp: Optional[int] = Field(default=None, alias="timestamp") + timestamp: Optional[int] = None """ Timestamp of the event, measured in milliseconds since the Unix epoch. By default, the Insights API uses the time it receives an event as its timestamp. """ @field_validator("event_name") @@ -78,6 +90,7 @@ def authenticated_user_token_validate_regular_expression(cls, value): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/insights/models/viewed_object_ids.py b/algoliasearch/insights/models/viewed_object_ids.py index 2fbda952e..d899c1e0d 100644 --- a/algoliasearch/insights/models/viewed_object_ids.py +++ b/algoliasearch/insights/models/viewed_object_ids.py @@ -11,7 +11,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field, field_validator +from pydantic import BaseModel, ConfigDict, field_validator if version_info >= (3, 11): from typing import Self @@ -21,26 +21,38 @@ from algoliasearch.insights.models.view_event import ViewEvent +_ALIASES = { + "event_name": "eventName", + "event_type": "eventType", + "index": "index", + "object_ids": "objectIDs", + "user_token": "userToken", + "authenticated_user_token": "authenticatedUserToken", + "timestamp": "timestamp", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ViewedObjectIDs(BaseModel): """ Use this event to track when users viewed items in the search results. """ - event_name: str = Field(alias="eventName") + event_name: str """ Event name, up to 64 ASCII characters. Consider naming events consistently—for example, by adopting Segment's [object-action](https://segment.com/academy/collecting-data/naming-conventions-for-clean-data/#the-object-action-framework) framework. """ - event_type: ViewEvent = Field(alias="eventType") - index: str = Field(alias="index") + event_type: ViewEvent + index: str """ Index name (case-sensitive) to which the event's items belong. """ - object_ids: List[str] = Field(alias="objectIDs") + object_ids: List[str] """ Object IDs of the records that are part of the event. """ - user_token: str = Field(alias="userToken") + user_token: str """ Anonymous or pseudonymous user identifier. Don't use personally identifiable information in user tokens. For more information, see [User token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - authenticated_user_token: Optional[str] = Field( - default=None, alias="authenticatedUserToken" - ) + authenticated_user_token: Optional[str] = None """ Identifier for authenticated users. When the user signs in, you can get an identifier from your system and send it as `authenticatedUserToken`. This lets you keep using the `userToken` from before the user signed in, while providing a reliable way to identify users across sessions. Don't use personally identifiable information in user tokens. For more information, see [User token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - timestamp: Optional[int] = Field(default=None, alias="timestamp") + timestamp: Optional[int] = None """ Timestamp of the event, measured in milliseconds since the Unix epoch. By default, the Insights API uses the time it receives an event as its timestamp. """ @field_validator("event_name") @@ -78,6 +90,7 @@ def authenticated_user_token_validate_regular_expression(cls, value): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/monitoring/client.py b/algoliasearch/monitoring/client.py index 170ed46e0..087011311 100644 --- a/algoliasearch/monitoring/client.py +++ b/algoliasearch/monitoring/client.py @@ -8,7 +8,7 @@ from json import dumps from sys import version_info -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Dict, Optional, Union from urllib.parse import quote from pydantic import Field, StrictStr @@ -20,8 +20,9 @@ from typing_extensions import Self from algoliasearch.http.api_response import ApiResponse +from algoliasearch.http.base_config import BaseConfig from algoliasearch.http.request_options import RequestOptions -from algoliasearch.http.serializer import bodySerializer +from algoliasearch.http.serializer import body_serializer from algoliasearch.http.transporter import Transporter from algoliasearch.http.transporter_sync import TransporterSync from algoliasearch.http.verb import Verb @@ -57,7 +58,7 @@ class MonitoringClient: """ _transporter: Transporter - _config: MonitoringConfig + _config: BaseConfig _request_options: RequestOptions def __init__( @@ -68,7 +69,9 @@ def __init__( config: Optional[MonitoringConfig] = None, ) -> None: if transporter is not None and config is None: - config = transporter._config + config = MonitoringConfig( + transporter.config.app_id, transporter.config.api_key + ) if config is None: config = MonitoringConfig(app_id, api_key) @@ -119,7 +122,7 @@ async def close(self) -> None: async def set_client_api_key(self, api_key: str) -> None: """Sets a new API key to authenticate requests.""" - self._transporter._config.set_client_api_key(api_key) + self._transporter.config.set_client_api_key(api_key) async def custom_delete_with_http_info( self, @@ -152,11 +155,11 @@ async def custom_delete_with_http_info( "Parameter `path` is required when calling `custom_delete`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return await self._transporter.request( verb=Verb.DELETE, @@ -227,11 +230,11 @@ async def custom_get_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_get`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return await self._transporter.request( verb=Verb.GET, @@ -306,11 +309,11 @@ async def custom_post_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_post`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -321,7 +324,7 @@ async def custom_post_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -398,11 +401,11 @@ async def custom_put_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_put`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -413,7 +416,7 @@ async def custom_put_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -700,17 +703,23 @@ async def get_latency( async def get_metrics_with_http_info( self, - metric: Annotated[ - Metric, - Field( - description="Metric to report. For more information about the individual metrics, see the description of the API response. To include all metrics, use `*`. " - ), + metric: Union[ + Annotated[ + Metric, + Field( + description="Metric to report. For more information about the individual metrics, see the description of the API response. To include all metrics, use `*`. " + ), + ], + str, ], - period: Annotated[ - Period, - Field( - description="Period over which to aggregate the metrics: - `minute`. Aggregate the last minute. 1 data point per 10 seconds. - `hour`. Aggregate the last hour. 1 data point per minute. - `day`. Aggregate the last day. 1 data point per 10 minutes. - `week`. Aggregate the last week. 1 data point per hour. - `month`. Aggregate the last month. 1 data point per day. " - ), + period: Union[ + Annotated[ + Period, + Field( + description="Period over which to aggregate the metrics: - `minute`. Aggregate the last minute. 1 data point per 10 seconds. - `hour`. Aggregate the last hour. 1 data point per minute. - `day`. Aggregate the last day. 1 data point per 10 minutes. - `week`. Aggregate the last week. 1 data point per hour. - `month`. Aggregate the last month. 1 data point per day. " + ), + ], + str, ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -749,17 +758,23 @@ async def get_metrics_with_http_info( async def get_metrics( self, - metric: Annotated[ - Metric, - Field( - description="Metric to report. For more information about the individual metrics, see the description of the API response. To include all metrics, use `*`. " - ), + metric: Union[ + Annotated[ + Metric, + Field( + description="Metric to report. For more information about the individual metrics, see the description of the API response. To include all metrics, use `*`. " + ), + ], + str, ], - period: Annotated[ - Period, - Field( - description="Period over which to aggregate the metrics: - `minute`. Aggregate the last minute. 1 data point per 10 seconds. - `hour`. Aggregate the last hour. 1 data point per minute. - `day`. Aggregate the last day. 1 data point per 10 minutes. - `week`. Aggregate the last week. 1 data point per hour. - `month`. Aggregate the last month. 1 data point per day. " - ), + period: Union[ + Annotated[ + Period, + Field( + description="Period over which to aggregate the metrics: - `minute`. Aggregate the last minute. 1 data point per 10 seconds. - `hour`. Aggregate the last hour. 1 data point per minute. - `day`. Aggregate the last day. 1 data point per 10 minutes. - `week`. Aggregate the last week. 1 data point per hour. - `month`. Aggregate the last month. 1 data point per day. " + ), + ], + str, ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> InfrastructureResponse: @@ -915,7 +930,7 @@ class MonitoringClientSync: """ _transporter: TransporterSync - _config: MonitoringConfig + _config: BaseConfig _request_options: RequestOptions def __init__( @@ -926,7 +941,9 @@ def __init__( config: Optional[MonitoringConfig] = None, ) -> None: if transporter is not None and config is None: - config = transporter._config + config = MonitoringConfig( + transporter.config.app_id, transporter.config.api_key + ) if config is None: config = MonitoringConfig(app_id, api_key) @@ -976,7 +993,7 @@ def close(self) -> None: def set_client_api_key(self, api_key: str) -> None: """Sets a new API key to authenticate requests.""" - self._transporter._config.set_client_api_key(api_key) + self._transporter.config.set_client_api_key(api_key) def custom_delete_with_http_info( self, @@ -1009,11 +1026,11 @@ def custom_delete_with_http_info( "Parameter `path` is required when calling `custom_delete`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return self._transporter.request( verb=Verb.DELETE, @@ -1082,11 +1099,11 @@ def custom_get_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_get`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return self._transporter.request( verb=Verb.GET, @@ -1161,11 +1178,11 @@ def custom_post_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_post`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -1176,7 +1193,7 @@ def custom_post_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -1251,11 +1268,11 @@ def custom_put_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_put`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -1266,7 +1283,7 @@ def custom_put_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -1549,17 +1566,23 @@ def get_latency( def get_metrics_with_http_info( self, - metric: Annotated[ - Metric, - Field( - description="Metric to report. For more information about the individual metrics, see the description of the API response. To include all metrics, use `*`. " - ), + metric: Union[ + Annotated[ + Metric, + Field( + description="Metric to report. For more information about the individual metrics, see the description of the API response. To include all metrics, use `*`. " + ), + ], + str, ], - period: Annotated[ - Period, - Field( - description="Period over which to aggregate the metrics: - `minute`. Aggregate the last minute. 1 data point per 10 seconds. - `hour`. Aggregate the last hour. 1 data point per minute. - `day`. Aggregate the last day. 1 data point per 10 minutes. - `week`. Aggregate the last week. 1 data point per hour. - `month`. Aggregate the last month. 1 data point per day. " - ), + period: Union[ + Annotated[ + Period, + Field( + description="Period over which to aggregate the metrics: - `minute`. Aggregate the last minute. 1 data point per 10 seconds. - `hour`. Aggregate the last hour. 1 data point per minute. - `day`. Aggregate the last day. 1 data point per 10 minutes. - `week`. Aggregate the last week. 1 data point per hour. - `month`. Aggregate the last month. 1 data point per day. " + ), + ], + str, ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -1598,17 +1621,23 @@ def get_metrics_with_http_info( def get_metrics( self, - metric: Annotated[ - Metric, - Field( - description="Metric to report. For more information about the individual metrics, see the description of the API response. To include all metrics, use `*`. " - ), + metric: Union[ + Annotated[ + Metric, + Field( + description="Metric to report. For more information about the individual metrics, see the description of the API response. To include all metrics, use `*`. " + ), + ], + str, ], - period: Annotated[ - Period, - Field( - description="Period over which to aggregate the metrics: - `minute`. Aggregate the last minute. 1 data point per 10 seconds. - `hour`. Aggregate the last hour. 1 data point per minute. - `day`. Aggregate the last day. 1 data point per 10 minutes. - `week`. Aggregate the last week. 1 data point per hour. - `month`. Aggregate the last month. 1 data point per day. " - ), + period: Union[ + Annotated[ + Period, + Field( + description="Period over which to aggregate the metrics: - `minute`. Aggregate the last minute. 1 data point per 10 seconds. - `hour`. Aggregate the last hour. 1 data point per minute. - `day`. Aggregate the last day. 1 data point per 10 minutes. - `week`. Aggregate the last week. 1 data point per hour. - `month`. Aggregate the last month. 1 data point per day. " + ), + ], + str, ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> InfrastructureResponse: diff --git a/algoliasearch/monitoring/config.py b/algoliasearch/monitoring/config.py index a3e870715..9bab7198d 100644 --- a/algoliasearch/monitoring/config.py +++ b/algoliasearch/monitoring/config.py @@ -1,4 +1,5 @@ from os import environ +from typing import Optional from algoliasearch.http.base_config import BaseConfig from algoliasearch.http.hosts import Host, HostsCollection @@ -6,11 +7,14 @@ class MonitoringConfig(BaseConfig): - def __init__(self, app_id: str, api_key: str) -> None: + def __init__(self, app_id: Optional[str], api_key: Optional[str]) -> None: super().__init__(app_id, api_key) user_agent = UserAgent().add("Monitoring") + assert app_id, "`app_id` is missing." + assert api_key, "`api_key` is missing." + self.headers = { "x-algolia-application-id": app_id, "x-algolia-api-key": api_key, diff --git a/algoliasearch/monitoring/models/error_base.py b/algoliasearch/monitoring/models/error_base.py index 075d8a3ac..01e47a756 100644 --- a/algoliasearch/monitoring/models/error_base.py +++ b/algoliasearch/monitoring/models/error_base.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,28 @@ from typing_extensions import Self +_ALIASES = { + "message": "message", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class ErrorBase(BaseModel): """ Error. """ - message: Optional[str] = Field(default=None, alias="message") + message: Optional[str] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, extra="allow", ) diff --git a/algoliasearch/monitoring/models/incident.py b/algoliasearch/monitoring/models/incident.py index e8ab1f79a..88b565b33 100644 --- a/algoliasearch/monitoring/models/incident.py +++ b/algoliasearch/monitoring/models/incident.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,21 +20,31 @@ from algoliasearch.monitoring.models.status import Status +_ALIASES = { + "title": "title", + "status": "status", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class Incident(BaseModel): """ Incident details. """ - title: Optional[str] = Field(default=None, alias="title") + title: Optional[str] = None """ Description of the incident. """ - status: Optional[Status] = Field(default=None, alias="status") + status: Optional[Status] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/monitoring/models/incident_entry.py b/algoliasearch/monitoring/models/incident_entry.py index cf4b608f0..30aba0e52 100644 --- a/algoliasearch/monitoring/models/incident_entry.py +++ b/algoliasearch/monitoring/models/incident_entry.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,21 +20,31 @@ from algoliasearch.monitoring.models.incident import Incident +_ALIASES = { + "t": "t", + "v": "v", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class IncidentEntry(BaseModel): """ IncidentEntry """ - t: Optional[int] = Field(default=None, alias="t") + t: Optional[int] = None """ Timestamp, measured in milliseconds since the Unix epoch. """ - v: Optional[Incident] = Field(default=None, alias="v") + v: Optional[Incident] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/monitoring/models/incidents_response.py b/algoliasearch/monitoring/models/incidents_response.py index 026891439..3efd889cf 100644 --- a/algoliasearch/monitoring/models/incidents_response.py +++ b/algoliasearch/monitoring/models/incidents_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,21 +20,28 @@ from algoliasearch.monitoring.models.incident_entry import IncidentEntry +_ALIASES = { + "incidents": "incidents", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class IncidentsResponse(BaseModel): """ IncidentsResponse """ - incidents: Optional[Dict[str, List[IncidentEntry]]] = Field( - default=None, alias="incidents" - ) + incidents: Optional[Dict[str, List[IncidentEntry]]] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/monitoring/models/indexing_metric.py b/algoliasearch/monitoring/models/indexing_metric.py index 3f0118f29..d3c25d9d7 100644 --- a/algoliasearch/monitoring/models/indexing_metric.py +++ b/algoliasearch/monitoring/models/indexing_metric.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,21 +20,28 @@ from algoliasearch.monitoring.models.time_entry import TimeEntry +_ALIASES = { + "indexing": "indexing", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class IndexingMetric(BaseModel): """ IndexingMetric """ - indexing: Optional[Dict[str, List[TimeEntry]]] = Field( - default=None, alias="indexing" - ) + indexing: Optional[Dict[str, List[TimeEntry]]] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/monitoring/models/indexing_time_response.py b/algoliasearch/monitoring/models/indexing_time_response.py index 0a08746fa..73e3ac94c 100644 --- a/algoliasearch/monitoring/models/indexing_time_response.py +++ b/algoliasearch/monitoring/models/indexing_time_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,28 @@ from algoliasearch.monitoring.models.indexing_metric import IndexingMetric +_ALIASES = { + "metrics": "metrics", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class IndexingTimeResponse(BaseModel): """ IndexingTimeResponse """ - metrics: Optional[IndexingMetric] = Field(default=None, alias="metrics") + metrics: Optional[IndexingMetric] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/monitoring/models/infrastructure_response.py b/algoliasearch/monitoring/models/infrastructure_response.py index 1c3bb417d..5c3a9b46f 100644 --- a/algoliasearch/monitoring/models/infrastructure_response.py +++ b/algoliasearch/monitoring/models/infrastructure_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,28 @@ from algoliasearch.monitoring.models.metrics import Metrics +_ALIASES = { + "metrics": "metrics", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class InfrastructureResponse(BaseModel): """ InfrastructureResponse """ - metrics: Optional[Metrics] = Field(default=None, alias="metrics") + metrics: Optional[Metrics] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/monitoring/models/inventory_response.py b/algoliasearch/monitoring/models/inventory_response.py index 5d4b0b214..cde160ed9 100644 --- a/algoliasearch/monitoring/models/inventory_response.py +++ b/algoliasearch/monitoring/models/inventory_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,28 @@ from algoliasearch.monitoring.models.server import Server +_ALIASES = { + "inventory": "inventory", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class InventoryResponse(BaseModel): """ InventoryResponse """ - inventory: Optional[List[Server]] = Field(default=None, alias="inventory") + inventory: Optional[List[Server]] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/monitoring/models/latency_metric.py b/algoliasearch/monitoring/models/latency_metric.py index 5ebc98c9e..cd3cf688c 100644 --- a/algoliasearch/monitoring/models/latency_metric.py +++ b/algoliasearch/monitoring/models/latency_metric.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,28 @@ from algoliasearch.monitoring.models.time_entry import TimeEntry +_ALIASES = { + "latency": "latency", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class LatencyMetric(BaseModel): """ LatencyMetric """ - latency: Optional[Dict[str, List[TimeEntry]]] = Field(default=None, alias="latency") + latency: Optional[Dict[str, List[TimeEntry]]] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/monitoring/models/latency_response.py b/algoliasearch/monitoring/models/latency_response.py index 53d739922..e661bf5fc 100644 --- a/algoliasearch/monitoring/models/latency_response.py +++ b/algoliasearch/monitoring/models/latency_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,28 @@ from algoliasearch.monitoring.models.latency_metric import LatencyMetric +_ALIASES = { + "metrics": "metrics", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class LatencyResponse(BaseModel): """ LatencyResponse """ - metrics: Optional[LatencyMetric] = Field(default=None, alias="metrics") + metrics: Optional[LatencyMetric] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/monitoring/models/metrics.py b/algoliasearch/monitoring/models/metrics.py index 8608de724..3fb14f0b3 100644 --- a/algoliasearch/monitoring/models/metrics.py +++ b/algoliasearch/monitoring/models/metrics.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,31 +20,33 @@ from algoliasearch.monitoring.models.probes_metric import ProbesMetric +_ALIASES = { + "cpu_usage": "cpu_usage", + "ram_indexing_usage": "ram_indexing_usage", + "ram_search_usage": "ram_search_usage", + "ssd_usage": "ssd_usage", + "avg_build_time": "avg_build_time", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class Metrics(BaseModel): """ Metrics """ - cpu_usage: Optional[Dict[str, List[ProbesMetric]]] = Field( - default=None, alias="cpu_usage" - ) + cpu_usage: Optional[Dict[str, List[ProbesMetric]]] = None """ CPU idleness in %. """ - ram_indexing_usage: Optional[Dict[str, List[ProbesMetric]]] = Field( - default=None, alias="ram_indexing_usage" - ) + ram_indexing_usage: Optional[Dict[str, List[ProbesMetric]]] = None """ RAM used for indexing in MB. """ - ram_search_usage: Optional[Dict[str, List[ProbesMetric]]] = Field( - default=None, alias="ram_search_usage" - ) + ram_search_usage: Optional[Dict[str, List[ProbesMetric]]] = None """ RAM used for search in MB. """ - ssd_usage: Optional[Dict[str, List[ProbesMetric]]] = Field( - default=None, alias="ssd_usage" - ) + ssd_usage: Optional[Dict[str, List[ProbesMetric]]] = None """ Solid-state disk (SSD) usage expressed as % of RAM. 0% means no SSD usage. A value of 50% indicates 32 GB SSD usage for a machine with 64 RAM. """ - avg_build_time: Optional[Dict[str, List[ProbesMetric]]] = Field( - default=None, alias="avg_build_time" - ) + avg_build_time: Optional[Dict[str, List[ProbesMetric]]] = None """ Average build time of the indices in seconds. """ model_config = ConfigDict( @@ -52,6 +54,7 @@ class Metrics(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/monitoring/models/probes_metric.py b/algoliasearch/monitoring/models/probes_metric.py index c7af87106..a8ac9c3e2 100644 --- a/algoliasearch/monitoring/models/probes_metric.py +++ b/algoliasearch/monitoring/models/probes_metric.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "t": "t", + "v": "v", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class ProbesMetric(BaseModel): """ ProbesMetric """ - t: Optional[int] = Field(default=None, alias="t") + t: Optional[int] = None """ Timestamp, measured in milliseconds since the Unix epoch. """ - v: Optional[int] = Field(default=None, alias="v") + v: Optional[int] = None """ Value of the metric. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class ProbesMetric(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/monitoring/models/server.py b/algoliasearch/monitoring/models/server.py index ec0e3e9e4..14dc496af 100644 --- a/algoliasearch/monitoring/models/server.py +++ b/algoliasearch/monitoring/models/server.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,29 +22,44 @@ from algoliasearch.monitoring.models.server_status import ServerStatus from algoliasearch.monitoring.models.type import Type +_ALIASES = { + "name": "name", + "region": "region", + "is_slave": "is_slave", + "is_replica": "is_replica", + "cluster": "cluster", + "status": "status", + "type": "type", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class Server(BaseModel): """ Server """ - name: Optional[str] = Field(default=None, alias="name") + name: Optional[str] = None """ Server name. """ - region: Optional[Region] = Field(default=None, alias="region") - is_slave: Optional[bool] = Field(default=None, alias="is_slave") + region: Optional[Region] = None + is_slave: Optional[bool] = None """ Included to support legacy applications. Use `is_replica` instead. """ - is_replica: Optional[bool] = Field(default=None, alias="is_replica") + is_replica: Optional[bool] = None """ Whether this server is a replica of another server. """ - cluster: Optional[str] = Field(default=None, alias="cluster") + cluster: Optional[str] = None """ Name of the cluster to which this server belongs. """ - status: Optional[ServerStatus] = Field(default=None, alias="status") - type: Optional[Type] = Field(default=None, alias="type") + status: Optional[ServerStatus] = None + type: Optional[Type] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/monitoring/models/status_response.py b/algoliasearch/monitoring/models/status_response.py index 29fa453c1..d5b9fde72 100644 --- a/algoliasearch/monitoring/models/status_response.py +++ b/algoliasearch/monitoring/models/status_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,28 @@ from algoliasearch.monitoring.models.status import Status +_ALIASES = { + "status": "status", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class StatusResponse(BaseModel): """ StatusResponse """ - status: Optional[Dict[str, Status]] = Field(default=None, alias="status") + status: Optional[Dict[str, Status]] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: @@ -60,6 +69,8 @@ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: if not isinstance(obj, dict): return cls.model_validate(obj) - obj["status"] = dict((_k, _v) for _k, _v in obj.get("status").items()) + status = obj.get("status") + if status is not None: + obj["status"] = dict((_k, _v) for _k, _v in status.items()) return cls.model_validate(obj) diff --git a/algoliasearch/monitoring/models/time_entry.py b/algoliasearch/monitoring/models/time_entry.py index f625f53f0..ca440143d 100644 --- a/algoliasearch/monitoring/models/time_entry.py +++ b/algoliasearch/monitoring/models/time_entry.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "t": "t", + "v": "v", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class TimeEntry(BaseModel): """ TimeEntry """ - t: Optional[int] = Field(default=None, alias="t") + t: Optional[int] = None """ Timestamp, measured in milliseconds since the Unix epoch. """ - v: Optional[int] = Field(default=None, alias="v") + v: Optional[int] = None """ Time in ms. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class TimeEntry(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/personalization/client.py b/algoliasearch/personalization/client.py index 1bcdc401f..809f9589d 100644 --- a/algoliasearch/personalization/client.py +++ b/algoliasearch/personalization/client.py @@ -8,7 +8,7 @@ from json import dumps from sys import version_info -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Dict, Optional, Union from urllib.parse import quote from pydantic import Field, StrictStr @@ -20,8 +20,9 @@ from typing_extensions import Self from algoliasearch.http.api_response import ApiResponse +from algoliasearch.http.base_config import BaseConfig from algoliasearch.http.request_options import RequestOptions -from algoliasearch.http.serializer import bodySerializer +from algoliasearch.http.serializer import body_serializer from algoliasearch.http.transporter import Transporter from algoliasearch.http.transporter_sync import TransporterSync from algoliasearch.http.verb import Verb @@ -59,19 +60,21 @@ class PersonalizationClient: """ _transporter: Transporter - _config: PersonalizationConfig + _config: BaseConfig _request_options: RequestOptions def __init__( self, app_id: Optional[str] = None, api_key: Optional[str] = None, - region: str = None, + region: str = "", transporter: Optional[Transporter] = None, config: Optional[PersonalizationConfig] = None, ) -> None: if transporter is not None and config is None: - config = transporter._config + config = PersonalizationConfig( + transporter.config.app_id, transporter.config.api_key, region + ) if config is None: config = PersonalizationConfig(app_id, api_key, region) @@ -123,7 +126,7 @@ async def close(self) -> None: async def set_client_api_key(self, api_key: str) -> None: """Sets a new API key to authenticate requests.""" - self._transporter._config.set_client_api_key(api_key) + self._transporter.config.set_client_api_key(api_key) async def custom_delete_with_http_info( self, @@ -156,11 +159,11 @@ async def custom_delete_with_http_info( "Parameter `path` is required when calling `custom_delete`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return await self._transporter.request( verb=Verb.DELETE, @@ -231,11 +234,11 @@ async def custom_get_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_get`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return await self._transporter.request( verb=Verb.GET, @@ -310,11 +313,11 @@ async def custom_post_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_post`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -325,7 +328,7 @@ async def custom_post_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -402,11 +405,11 @@ async def custom_put_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_put`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -417,7 +420,7 @@ async def custom_put_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -626,7 +629,9 @@ async def get_user_token_profile( async def set_personalization_strategy_with_http_info( self, - personalization_strategy_params: PersonalizationStrategyParams, + personalization_strategy_params: Union[ + PersonalizationStrategyParams, dict[str, Any] + ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -654,7 +659,7 @@ async def set_personalization_strategy_with_http_info( verb=Verb.POST, path="/1/strategies/personalization", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -662,7 +667,9 @@ async def set_personalization_strategy_with_http_info( async def set_personalization_strategy( self, - personalization_strategy_params: PersonalizationStrategyParams, + personalization_strategy_params: Union[ + PersonalizationStrategyParams, dict[str, Any] + ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SetPersonalizationStrategyResponse: """ @@ -701,19 +708,21 @@ class PersonalizationClientSync: """ _transporter: TransporterSync - _config: PersonalizationConfig + _config: BaseConfig _request_options: RequestOptions def __init__( self, app_id: Optional[str] = None, api_key: Optional[str] = None, - region: str = None, + region: str = "", transporter: Optional[TransporterSync] = None, config: Optional[PersonalizationConfig] = None, ) -> None: if transporter is not None and config is None: - config = transporter._config + config = PersonalizationConfig( + transporter.config.app_id, transporter.config.api_key, region + ) if config is None: config = PersonalizationConfig(app_id, api_key, region) @@ -766,7 +775,7 @@ def close(self) -> None: def set_client_api_key(self, api_key: str) -> None: """Sets a new API key to authenticate requests.""" - self._transporter._config.set_client_api_key(api_key) + self._transporter.config.set_client_api_key(api_key) def custom_delete_with_http_info( self, @@ -799,11 +808,11 @@ def custom_delete_with_http_info( "Parameter `path` is required when calling `custom_delete`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return self._transporter.request( verb=Verb.DELETE, @@ -872,11 +881,11 @@ def custom_get_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_get`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return self._transporter.request( verb=Verb.GET, @@ -951,11 +960,11 @@ def custom_post_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_post`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -966,7 +975,7 @@ def custom_post_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -1041,11 +1050,11 @@ def custom_put_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_put`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -1056,7 +1065,7 @@ def custom_put_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -1259,7 +1268,9 @@ def get_user_token_profile( def set_personalization_strategy_with_http_info( self, - personalization_strategy_params: PersonalizationStrategyParams, + personalization_strategy_params: Union[ + PersonalizationStrategyParams, dict[str, Any] + ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -1287,7 +1298,7 @@ def set_personalization_strategy_with_http_info( verb=Verb.POST, path="/1/strategies/personalization", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -1295,7 +1306,9 @@ def set_personalization_strategy_with_http_info( def set_personalization_strategy( self, - personalization_strategy_params: PersonalizationStrategyParams, + personalization_strategy_params: Union[ + PersonalizationStrategyParams, dict[str, Any] + ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SetPersonalizationStrategyResponse: """ diff --git a/algoliasearch/personalization/config.py b/algoliasearch/personalization/config.py index b9313fe06..fa3cde3c5 100644 --- a/algoliasearch/personalization/config.py +++ b/algoliasearch/personalization/config.py @@ -1,4 +1,5 @@ from os import environ +from typing import Optional from algoliasearch.http.base_config import BaseConfig from algoliasearch.http.hosts import Host, HostsCollection @@ -6,11 +7,16 @@ class PersonalizationConfig(BaseConfig): - def __init__(self, app_id: str, api_key: str, region: str = None) -> None: + def __init__( + self, app_id: Optional[str], api_key: Optional[str], region: str = "" + ) -> None: super().__init__(app_id, api_key) user_agent = UserAgent().add("Personalization") + assert app_id, "`app_id` is missing." + assert api_key, "`api_key` is missing." + self.headers = { "x-algolia-application-id": app_id, "x-algolia-api-key": api_key, @@ -37,5 +43,11 @@ def __init__(self, app_id: str, api_key: str, region: str = None) -> None: ) self.hosts = HostsCollection( - [Host("personalization.{region}.algolia.com".replace("{region}", region))] + [ + Host( + "personalization.{region}.algolia.com".replace( + "{region}", region or "" + ) + ) + ] ) diff --git a/algoliasearch/personalization/models/delete_user_profile_response.py b/algoliasearch/personalization/models/delete_user_profile_response.py index 5d21f8edc..b5cf3ae89 100644 --- a/algoliasearch/personalization/models/delete_user_profile_response.py +++ b/algoliasearch/personalization/models/delete_user_profile_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "user_token": "userToken", + "deleted_until": "deletedUntil", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class DeleteUserProfileResponse(BaseModel): """ DeleteUserProfileResponse """ - user_token: str = Field(alias="userToken") + user_token: str """ Unique pseudonymous or anonymous user identifier. This helps with analytics and click and conversion events. For more information, see [user token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - deleted_until: str = Field(alias="deletedUntil") + deleted_until: str """ Date and time when the user profile can be safely considered to be deleted. Any events received after the `deletedUntil` date start a new user profile. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class DeleteUserProfileResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/personalization/models/error_base.py b/algoliasearch/personalization/models/error_base.py index 075d8a3ac..01e47a756 100644 --- a/algoliasearch/personalization/models/error_base.py +++ b/algoliasearch/personalization/models/error_base.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,28 @@ from typing_extensions import Self +_ALIASES = { + "message": "message", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class ErrorBase(BaseModel): """ Error. """ - message: Optional[str] = Field(default=None, alias="message") + message: Optional[str] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, extra="allow", ) diff --git a/algoliasearch/personalization/models/event_scoring.py b/algoliasearch/personalization/models/event_scoring.py index ab675db4c..886efdbdb 100644 --- a/algoliasearch/personalization/models/event_scoring.py +++ b/algoliasearch/personalization/models/event_scoring.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,23 +20,34 @@ from algoliasearch.personalization.models.event_type import EventType +_ALIASES = { + "score": "score", + "event_name": "eventName", + "event_type": "eventType", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class EventScoring(BaseModel): """ EventScoring """ - score: int = Field(alias="score") + score: int """ Event score. """ - event_name: str = Field(alias="eventName") + event_name: str """ Event name. """ - event_type: EventType = Field(alias="eventType") + event_type: EventType model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/personalization/models/facet_scoring.py b/algoliasearch/personalization/models/facet_scoring.py index 88aa045a2..e7ea7ac89 100644 --- a/algoliasearch/personalization/models/facet_scoring.py +++ b/algoliasearch/personalization/models/facet_scoring.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "score": "score", + "facet_name": "facetName", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class FacetScoring(BaseModel): """ FacetScoring """ - score: int = Field(alias="score") + score: int """ Event score. """ - facet_name: str = Field(alias="facetName") + facet_name: str """ Facet attribute name. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class FacetScoring(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/personalization/models/get_user_token_response.py b/algoliasearch/personalization/models/get_user_token_response.py index 973c54f87..6f2512b16 100644 --- a/algoliasearch/personalization/models/get_user_token_response.py +++ b/algoliasearch/personalization/models/get_user_token_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "user_token": "userToken", + "last_event_at": "lastEventAt", + "scores": "scores", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class GetUserTokenResponse(BaseModel): """ GetUserTokenResponse """ - user_token: str = Field(alias="userToken") + user_token: str """ Unique pseudonymous or anonymous user identifier. This helps with analytics and click and conversion events. For more information, see [user token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - last_event_at: str = Field(alias="lastEventAt") + last_event_at: str """ Date and time of the last event from this user, in RFC 3339 format. """ - scores: object = Field(alias="scores") + scores: object """ Scores for different facet values. Scores represent the user affinity for a user profile towards specific facet values, given the personalization strategy and past events. """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class GetUserTokenResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/personalization/models/personalization_strategy_params.py b/algoliasearch/personalization/models/personalization_strategy_params.py index f2586796b..b2daf5a66 100644 --- a/algoliasearch/personalization/models/personalization_strategy_params.py +++ b/algoliasearch/personalization/models/personalization_strategy_params.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,17 +21,27 @@ from algoliasearch.personalization.models.event_scoring import EventScoring from algoliasearch.personalization.models.facet_scoring import FacetScoring +_ALIASES = { + "event_scoring": "eventScoring", + "facet_scoring": "facetScoring", + "personalization_impact": "personalizationImpact", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class PersonalizationStrategyParams(BaseModel): """ PersonalizationStrategyParams """ - event_scoring: List[EventScoring] = Field(alias="eventScoring") + event_scoring: List[EventScoring] """ Scores associated with each event. The higher the scores, the higher the impact of those events on the personalization of search results. """ - facet_scoring: List[FacetScoring] = Field(alias="facetScoring") + facet_scoring: List[FacetScoring] """ Scores associated with each facet. The higher the scores, the higher the impact of those events on the personalization of search results. """ - personalization_impact: int = Field(alias="personalizationImpact") + personalization_impact: int """ Impact of personalization on the search results. If set to 0, personalization has no impact on the search results. """ model_config = ConfigDict( @@ -39,6 +49,7 @@ class PersonalizationStrategyParams(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/personalization/models/set_personalization_strategy_response.py b/algoliasearch/personalization/models/set_personalization_strategy_response.py index 529cbb371..8f2433c43 100644 --- a/algoliasearch/personalization/models/set_personalization_strategy_response.py +++ b/algoliasearch/personalization/models/set_personalization_strategy_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "message": "message", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class SetPersonalizationStrategyResponse(BaseModel): """ SetPersonalizationStrategyResponse """ - message: str = Field(alias="message") + message: str """ A message confirming the strategy update. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class SetPersonalizationStrategyResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/query_suggestions/client.py b/algoliasearch/query_suggestions/client.py index 3b065b7e6..e01bc8f36 100644 --- a/algoliasearch/query_suggestions/client.py +++ b/algoliasearch/query_suggestions/client.py @@ -8,7 +8,7 @@ from json import dumps from sys import version_info -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Dict, List, Optional, Union from urllib.parse import quote from pydantic import Field, StrictStr @@ -20,8 +20,9 @@ from typing_extensions import Self from algoliasearch.http.api_response import ApiResponse +from algoliasearch.http.base_config import BaseConfig from algoliasearch.http.request_options import RequestOptions -from algoliasearch.http.serializer import bodySerializer +from algoliasearch.http.serializer import body_serializer from algoliasearch.http.transporter import Transporter from algoliasearch.http.transporter_sync import TransporterSync from algoliasearch.http.verb import Verb @@ -57,19 +58,21 @@ class QuerySuggestionsClient: """ _transporter: Transporter - _config: QuerySuggestionsConfig + _config: BaseConfig _request_options: RequestOptions def __init__( self, app_id: Optional[str] = None, api_key: Optional[str] = None, - region: str = None, + region: str = "", transporter: Optional[Transporter] = None, config: Optional[QuerySuggestionsConfig] = None, ) -> None: if transporter is not None and config is None: - config = transporter._config + config = QuerySuggestionsConfig( + transporter.config.app_id, transporter.config.api_key, region + ) if config is None: config = QuerySuggestionsConfig(app_id, api_key, region) @@ -121,11 +124,11 @@ async def close(self) -> None: async def set_client_api_key(self, api_key: str) -> None: """Sets a new API key to authenticate requests.""" - self._transporter._config.set_client_api_key(api_key) + self._transporter.config.set_client_api_key(api_key) async def create_config_with_http_info( self, - configuration_with_index: ConfigurationWithIndex, + configuration_with_index: Union[ConfigurationWithIndex, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -153,7 +156,7 @@ async def create_config_with_http_info( verb=Verb.POST, path="/1/configs", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -161,7 +164,7 @@ async def create_config_with_http_info( async def create_config( self, - configuration_with_index: ConfigurationWithIndex, + configuration_with_index: Union[ConfigurationWithIndex, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> BaseResponse: """ @@ -211,11 +214,11 @@ async def custom_delete_with_http_info( "Parameter `path` is required when calling `custom_delete`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return await self._transporter.request( verb=Verb.DELETE, @@ -286,11 +289,11 @@ async def custom_get_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_get`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return await self._transporter.request( verb=Verb.GET, @@ -365,11 +368,11 @@ async def custom_post_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_post`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -380,7 +383,7 @@ async def custom_post_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -457,11 +460,11 @@ async def custom_put_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_put`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -472,7 +475,7 @@ async def custom_put_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -780,7 +783,7 @@ async def update_config_with_http_info( index_name: Annotated[ StrictStr, Field(description="Query Suggestions index name.") ], - configuration: Configuration, + configuration: Union[Configuration, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -817,7 +820,7 @@ async def update_config_with_http_info( "{indexName}", quote(str(index_name), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -828,7 +831,7 @@ async def update_config( index_name: Annotated[ StrictStr, Field(description="Query Suggestions index name.") ], - configuration: Configuration, + configuration: Union[Configuration, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> BaseResponse: """ @@ -869,19 +872,21 @@ class QuerySuggestionsClientSync: """ _transporter: TransporterSync - _config: QuerySuggestionsConfig + _config: BaseConfig _request_options: RequestOptions def __init__( self, app_id: Optional[str] = None, api_key: Optional[str] = None, - region: str = None, + region: str = "", transporter: Optional[TransporterSync] = None, config: Optional[QuerySuggestionsConfig] = None, ) -> None: if transporter is not None and config is None: - config = transporter._config + config = QuerySuggestionsConfig( + transporter.config.app_id, transporter.config.api_key, region + ) if config is None: config = QuerySuggestionsConfig(app_id, api_key, region) @@ -934,11 +939,11 @@ def close(self) -> None: def set_client_api_key(self, api_key: str) -> None: """Sets a new API key to authenticate requests.""" - self._transporter._config.set_client_api_key(api_key) + self._transporter.config.set_client_api_key(api_key) def create_config_with_http_info( self, - configuration_with_index: ConfigurationWithIndex, + configuration_with_index: Union[ConfigurationWithIndex, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -966,7 +971,7 @@ def create_config_with_http_info( verb=Verb.POST, path="/1/configs", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -974,7 +979,7 @@ def create_config_with_http_info( def create_config( self, - configuration_with_index: ConfigurationWithIndex, + configuration_with_index: Union[ConfigurationWithIndex, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> BaseResponse: """ @@ -1024,11 +1029,11 @@ def custom_delete_with_http_info( "Parameter `path` is required when calling `custom_delete`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return self._transporter.request( verb=Verb.DELETE, @@ -1097,11 +1102,11 @@ def custom_get_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_get`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return self._transporter.request( verb=Verb.GET, @@ -1176,11 +1181,11 @@ def custom_post_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_post`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -1191,7 +1196,7 @@ def custom_post_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -1266,11 +1271,11 @@ def custom_put_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_put`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -1281,7 +1286,7 @@ def custom_put_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -1587,7 +1592,7 @@ def update_config_with_http_info( index_name: Annotated[ StrictStr, Field(description="Query Suggestions index name.") ], - configuration: Configuration, + configuration: Union[Configuration, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -1624,7 +1629,7 @@ def update_config_with_http_info( "{indexName}", quote(str(index_name), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -1635,7 +1640,7 @@ def update_config( index_name: Annotated[ StrictStr, Field(description="Query Suggestions index name.") ], - configuration: Configuration, + configuration: Union[Configuration, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> BaseResponse: """ diff --git a/algoliasearch/query_suggestions/config.py b/algoliasearch/query_suggestions/config.py index 7cec35206..e318f3c42 100644 --- a/algoliasearch/query_suggestions/config.py +++ b/algoliasearch/query_suggestions/config.py @@ -1,4 +1,5 @@ from os import environ +from typing import Optional from algoliasearch.http.base_config import BaseConfig from algoliasearch.http.hosts import Host, HostsCollection @@ -6,11 +7,16 @@ class QuerySuggestionsConfig(BaseConfig): - def __init__(self, app_id: str, api_key: str, region: str = None) -> None: + def __init__( + self, app_id: Optional[str], api_key: Optional[str], region: str = "" + ) -> None: super().__init__(app_id, api_key) user_agent = UserAgent().add("QuerySuggestions") + assert app_id, "`app_id` is missing." + assert api_key, "`api_key` is missing." + self.headers = { "x-algolia-application-id": app_id, "x-algolia-api-key": api_key, @@ -37,5 +43,11 @@ def __init__(self, app_id: str, api_key: str, region: str = None) -> None: ) self.hosts = HostsCollection( - [Host("query-suggestions.{region}.algolia.com".replace("{region}", region))] + [ + Host( + "query-suggestions.{region}.algolia.com".replace( + "{region}", region or "" + ) + ) + ] ) diff --git a/algoliasearch/query_suggestions/models/base_response.py b/algoliasearch/query_suggestions/models/base_response.py index 3c6847db1..e5e03802c 100644 --- a/algoliasearch/query_suggestions/models/base_response.py +++ b/algoliasearch/query_suggestions/models/base_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "status": "status", + "message": "message", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class BaseResponse(BaseModel): """ BaseResponse """ - status: Optional[int] = Field(default=None, alias="status") + status: Optional[int] = None """ HTTP status code. """ - message: Optional[str] = Field(default=None, alias="message") + message: Optional[str] = None """ Details about the response, such as error messages. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class BaseResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/query_suggestions/models/config_status.py b/algoliasearch/query_suggestions/models/config_status.py index db3818e15..7ec21eb02 100644 --- a/algoliasearch/query_suggestions/models/config_status.py +++ b/algoliasearch/query_suggestions/models/config_status.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,24 +18,33 @@ from typing_extensions import Self +_ALIASES = { + "index_name": "indexName", + "is_running": "isRunning", + "last_built_at": "lastBuiltAt", + "last_successful_built_at": "lastSuccessfulBuiltAt", + "last_successful_build_duration": "lastSuccessfulBuildDuration", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class ConfigStatus(BaseModel): """ ConfigStatus """ - index_name: Optional[str] = Field(default=None, alias="indexName") + index_name: Optional[str] = None """ Name of the Query Suggestions index (case-sensitive). """ - is_running: Optional[bool] = Field(default=None, alias="isRunning") + is_running: Optional[bool] = None """ Whether the creation or update of the Query Suggestions index is in progress. """ - last_built_at: Optional[str] = Field(default=None, alias="lastBuiltAt") + last_built_at: Optional[str] = None """ Date and time when the Query Suggestions index was last built, in RFC 3339 format. """ - last_successful_built_at: Optional[str] = Field( - default=None, alias="lastSuccessfulBuiltAt" - ) + last_successful_built_at: Optional[str] = None """ Date and time when the Query Suggestions index was last updated successfully. """ - last_successful_build_duration: Optional[str] = Field( - default=None, alias="lastSuccessfulBuildDuration" - ) + last_successful_build_duration: Optional[str] = None """ Duration of the last successful build in seconds. """ model_config = ConfigDict( @@ -43,6 +52,7 @@ class ConfigStatus(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/query_suggestions/models/configuration.py b/algoliasearch/query_suggestions/models/configuration.py index 072f83301..8a207e31f 100644 --- a/algoliasearch/query_suggestions/models/configuration.py +++ b/algoliasearch/query_suggestions/models/configuration.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,23 +21,31 @@ from algoliasearch.query_suggestions.models.languages import Languages from algoliasearch.query_suggestions.models.source_index import SourceIndex +_ALIASES = { + "source_indices": "sourceIndices", + "languages": "languages", + "exclude": "exclude", + "enable_personalization": "enablePersonalization", + "allow_special_characters": "allowSpecialCharacters", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class Configuration(BaseModel): """ Query Suggestions configuration. """ - source_indices: List[SourceIndex] = Field(alias="sourceIndices") + source_indices: List[SourceIndex] """ Algolia indices from which to get the popular searches for query suggestions. """ - languages: Optional[Languages] = Field(default=None, alias="languages") - exclude: Optional[List[str]] = Field(default=None, alias="exclude") - enable_personalization: Optional[bool] = Field( - default=None, alias="enablePersonalization" - ) + languages: Optional[Languages] = None + exclude: Optional[List[str]] = None + enable_personalization: Optional[bool] = None """ Whether to turn on personalized query suggestions. """ - allow_special_characters: Optional[bool] = Field( - default=None, alias="allowSpecialCharacters" - ) + allow_special_characters: Optional[bool] = None """ Whether to include suggestions with special characters. """ model_config = ConfigDict( @@ -45,6 +53,7 @@ class Configuration(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/query_suggestions/models/configuration_response.py b/algoliasearch/query_suggestions/models/configuration_response.py index 20e9b3e90..2255aa734 100644 --- a/algoliasearch/query_suggestions/models/configuration_response.py +++ b/algoliasearch/query_suggestions/models/configuration_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,23 +21,37 @@ from algoliasearch.query_suggestions.models.languages import Languages from algoliasearch.query_suggestions.models.source_index import SourceIndex +_ALIASES = { + "app_id": "appID", + "index_name": "indexName", + "source_indices": "sourceIndices", + "languages": "languages", + "exclude": "exclude", + "enable_personalization": "enablePersonalization", + "allow_special_characters": "allowSpecialCharacters", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ConfigurationResponse(BaseModel): """ API response for retrieving Query Suggestions configurations. """ - app_id: str = Field(alias="appID") + app_id: str """ Algolia application ID to which this Query Suggestions configuration belongs. """ - index_name: str = Field(alias="indexName") + index_name: str """ Name of the Query Suggestions index (case-sensitive). """ - source_indices: List[SourceIndex] = Field(alias="sourceIndices") + source_indices: List[SourceIndex] """ Algolia indices from which to get the popular searches for query suggestions. """ - languages: Languages = Field(alias="languages") - exclude: List[str] = Field(alias="exclude") - enable_personalization: bool = Field(alias="enablePersonalization") + languages: Languages + exclude: List[str] + enable_personalization: bool """ Whether to turn on personalized query suggestions. """ - allow_special_characters: bool = Field(alias="allowSpecialCharacters") + allow_special_characters: bool """ Whether to include suggestions with special characters. """ model_config = ConfigDict( @@ -45,6 +59,7 @@ class ConfigurationResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/query_suggestions/models/configuration_with_index.py b/algoliasearch/query_suggestions/models/configuration_with_index.py index 68d0bfa22..35bce4053 100644 --- a/algoliasearch/query_suggestions/models/configuration_with_index.py +++ b/algoliasearch/query_suggestions/models/configuration_with_index.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,25 +21,34 @@ from algoliasearch.query_suggestions.models.languages import Languages from algoliasearch.query_suggestions.models.source_index import SourceIndex +_ALIASES = { + "source_indices": "sourceIndices", + "languages": "languages", + "exclude": "exclude", + "enable_personalization": "enablePersonalization", + "allow_special_characters": "allowSpecialCharacters", + "index_name": "indexName", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ConfigurationWithIndex(BaseModel): """ Query Suggestions configuration. """ - source_indices: List[SourceIndex] = Field(alias="sourceIndices") + source_indices: List[SourceIndex] """ Algolia indices from which to get the popular searches for query suggestions. """ - languages: Optional[Languages] = Field(default=None, alias="languages") - exclude: Optional[List[str]] = Field(default=None, alias="exclude") - enable_personalization: Optional[bool] = Field( - default=None, alias="enablePersonalization" - ) + languages: Optional[Languages] = None + exclude: Optional[List[str]] = None + enable_personalization: Optional[bool] = None """ Whether to turn on personalized query suggestions. """ - allow_special_characters: Optional[bool] = Field( - default=None, alias="allowSpecialCharacters" - ) + allow_special_characters: Optional[bool] = None """ Whether to include suggestions with special characters. """ - index_name: str = Field(alias="indexName") + index_name: str """ Name of the Query Suggestions index (case-sensitive). """ model_config = ConfigDict( @@ -47,6 +56,7 @@ class ConfigurationWithIndex(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/query_suggestions/models/error_base.py b/algoliasearch/query_suggestions/models/error_base.py index 075d8a3ac..01e47a756 100644 --- a/algoliasearch/query_suggestions/models/error_base.py +++ b/algoliasearch/query_suggestions/models/error_base.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,28 @@ from typing_extensions import Self +_ALIASES = { + "message": "message", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class ErrorBase(BaseModel): """ Error. """ - message: Optional[str] = Field(default=None, alias="message") + message: Optional[str] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, extra="allow", ) diff --git a/algoliasearch/query_suggestions/models/facet.py b/algoliasearch/query_suggestions/models/facet.py index dfe651e4a..cedb4ae48 100644 --- a/algoliasearch/query_suggestions/models/facet.py +++ b/algoliasearch/query_suggestions/models/facet.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "attribute": "attribute", + "amount": "amount", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class Facet(BaseModel): """ Facet to use as category. """ - attribute: Optional[str] = Field(default=None, alias="attribute") + attribute: Optional[str] = None """ Facet name. """ - amount: Optional[int] = Field(default=None, alias="amount") + amount: Optional[int] = None """ Number of suggestions. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class Facet(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/query_suggestions/models/languages.py b/algoliasearch/query_suggestions/models/languages.py index 2ccb3a9fa..52c0aface 100644 --- a/algoliasearch/query_suggestions/models/languages.py +++ b/algoliasearch/query_suggestions/models/languages.py @@ -27,7 +27,7 @@ class Languages(BaseModel): """ Languages for which to deduplicate singular and plural forms. """ oneof_schema_2_validator: Optional[bool] = Field(default=None) """ If true, deduplication is enabled for all languages. """ - actual_instance: Optional[Union[List[str], bool]] = None + actual_instance: Union[List[str], bool, None] = None one_of_schemas: Set[str] = {"List[str]", "bool"} def __init__(self, *args, **kwargs) -> None: @@ -40,12 +40,12 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[List[str], bool]]: + def unwrap_actual_instance(self) -> Union[List[str], bool, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -88,9 +88,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -100,8 +100,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], List[str], bool]]: return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/query_suggestions/models/log_file.py b/algoliasearch/query_suggestions/models/log_file.py index ce3fbcc4d..9f5e43464 100644 --- a/algoliasearch/query_suggestions/models/log_file.py +++ b/algoliasearch/query_suggestions/models/log_file.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,18 +20,29 @@ from algoliasearch.query_suggestions.models.log_level import LogLevel +_ALIASES = { + "timestamp": "timestamp", + "level": "level", + "message": "message", + "context_level": "contextLevel", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class LogFile(BaseModel): """ LogFile """ - timestamp: Optional[str] = Field(default=None, alias="timestamp") + timestamp: Optional[str] = None """ Date and time of the log entry, in RFC 3339 format. """ - level: Optional[LogLevel] = Field(default=None, alias="level") - message: Optional[str] = Field(default=None, alias="message") + level: Optional[LogLevel] = None + message: Optional[str] = None """ Details about this log entry. """ - context_level: Optional[int] = Field(default=None, alias="contextLevel") + context_level: Optional[int] = None """ Level indicating the position of a suggestion in a hierarchy of records. For example, a `contextLevel` of 1 indicates that this suggestion belongs to a previous suggestion with `contextLevel` 0. """ model_config = ConfigDict( @@ -39,6 +50,7 @@ class LogFile(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/query_suggestions/models/source_index.py b/algoliasearch/query_suggestions/models/source_index.py index 22711c6ba..a7c52c76f 100644 --- a/algoliasearch/query_suggestions/models/source_index.py +++ b/algoliasearch/query_suggestions/models/source_index.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,30 +20,46 @@ from algoliasearch.query_suggestions.models.facet import Facet +_ALIASES = { + "index_name": "indexName", + "replicas": "replicas", + "analytics_tags": "analyticsTags", + "facets": "facets", + "min_hits": "minHits", + "min_letters": "minLetters", + "generate": "generate", + "external": "external", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SourceIndex(BaseModel): """ Configuration of an Algolia index for Query Suggestions. """ - index_name: str = Field(alias="indexName") + index_name: str """ Name of the Algolia index (case-sensitive) to use as source for query suggestions. """ - replicas: Optional[bool] = Field(default=None, alias="replicas") + replicas: Optional[bool] = None """ If true, Query Suggestions uses all replica indices to find popular searches. If false, only the primary index is used. """ - analytics_tags: Optional[List[str]] = Field(default=None, alias="analyticsTags") - facets: Optional[List[Facet]] = Field(default=None, alias="facets") - min_hits: Optional[int] = Field(default=None, alias="minHits") + analytics_tags: Optional[List[str]] = None + facets: Optional[List[Facet]] = None + min_hits: Optional[int] = None """ Minimum number of hits required to be included as a suggestion. A search query must at least generate `minHits` search results to be included in the Query Suggestions index. """ - min_letters: Optional[int] = Field(default=None, alias="minLetters") + min_letters: Optional[int] = None """ Minimum letters required to be included as a suggestion. A search query must be at least `minLetters` long to be included in the Query Suggestions index. """ - generate: Optional[List[List[str]]] = Field(default=None, alias="generate") - external: Optional[List[str]] = Field(default=None, alias="external") + generate: Optional[List[List[str]]] = None + external: Optional[List[str]] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/client.py b/algoliasearch/recommend/client.py index 5d8139bc5..77768ae5d 100644 --- a/algoliasearch/recommend/client.py +++ b/algoliasearch/recommend/client.py @@ -8,7 +8,7 @@ from json import dumps from sys import version_info -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Dict, List, Optional, Union from urllib.parse import quote from pydantic import Field, StrictInt, StrictStr @@ -20,8 +20,9 @@ from typing_extensions import Self from algoliasearch.http.api_response import ApiResponse +from algoliasearch.http.base_config import BaseConfig from algoliasearch.http.request_options import RequestOptions -from algoliasearch.http.serializer import bodySerializer +from algoliasearch.http.serializer import body_serializer from algoliasearch.http.transporter import Transporter from algoliasearch.http.transporter_sync import TransporterSync from algoliasearch.http.verb import Verb @@ -68,7 +69,7 @@ class RecommendClient: """ _transporter: Transporter - _config: RecommendConfig + _config: BaseConfig _request_options: RequestOptions def __init__( @@ -79,7 +80,9 @@ def __init__( config: Optional[RecommendConfig] = None, ) -> None: if transporter is not None and config is None: - config = transporter._config + config = RecommendConfig( + transporter.config.app_id, transporter.config.api_key + ) if config is None: config = RecommendConfig(app_id, api_key) @@ -130,7 +133,7 @@ async def close(self) -> None: async def set_client_api_key(self, api_key: str) -> None: """Sets a new API key to authenticate requests.""" - self._transporter._config.set_client_api_key(api_key) + self._transporter.config.set_client_api_key(api_key) async def batch_recommend_rules_with_http_info( self, @@ -138,13 +141,18 @@ async def batch_recommend_rules_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - model: Annotated[ - RecommendModels, - Field( - description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " - ), + model: Union[ + Annotated[ + RecommendModels, + Field( + description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " + ), + ], + str, ], - recommend_rule: Optional[List[RecommendRule]] = None, + recommend_rule: Union[ + Optional[List[RecommendRule]], list[dict[str, Any]] + ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -183,7 +191,7 @@ async def batch_recommend_rules_with_http_info( "{indexName}", quote(str(index_name), safe="") ).replace("{model}", quote(str(model), safe="")), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -195,13 +203,18 @@ async def batch_recommend_rules( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - model: Annotated[ - RecommendModels, - Field( - description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " - ), + model: Union[ + Annotated[ + RecommendModels, + Field( + description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " + ), + ], + str, ], - recommend_rule: Optional[List[RecommendRule]] = None, + recommend_rule: Union[ + Optional[List[RecommendRule]], list[dict[str, Any]] + ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> RecommendUpdatedAtResponse: """ @@ -255,11 +268,11 @@ async def custom_delete_with_http_info( "Parameter `path` is required when calling `custom_delete`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return await self._transporter.request( verb=Verb.DELETE, @@ -330,11 +343,11 @@ async def custom_get_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_get`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return await self._transporter.request( verb=Verb.GET, @@ -409,11 +422,11 @@ async def custom_post_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_post`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -424,7 +437,7 @@ async def custom_post_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -501,11 +514,11 @@ async def custom_put_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_put`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -516,7 +529,7 @@ async def custom_put_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -564,11 +577,14 @@ async def delete_recommend_rule_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - model: Annotated[ - RecommendModels, - Field( - description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " - ), + model: Union[ + Annotated[ + RecommendModels, + Field( + description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " + ), + ], + str, ], object_id: Annotated[StrictStr, Field(description="Unique record identifier.")], request_options: Optional[Union[dict, RequestOptions]] = None, @@ -623,11 +639,14 @@ async def delete_recommend_rule( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - model: Annotated[ - RecommendModels, - Field( - description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " - ), + model: Union[ + Annotated[ + RecommendModels, + Field( + description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " + ), + ], + str, ], object_id: Annotated[StrictStr, Field(description="Unique record identifier.")], request_options: Optional[Union[dict, RequestOptions]] = None, @@ -658,11 +677,14 @@ async def get_recommend_rule_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - model: Annotated[ - RecommendModels, - Field( - description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " - ), + model: Union[ + Annotated[ + RecommendModels, + Field( + description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " + ), + ], + str, ], object_id: Annotated[StrictStr, Field(description="Unique record identifier.")], request_options: Optional[Union[dict, RequestOptions]] = None, @@ -717,11 +739,14 @@ async def get_recommend_rule( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - model: Annotated[ - RecommendModels, - Field( - description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " - ), + model: Union[ + Annotated[ + RecommendModels, + Field( + description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " + ), + ], + str, ], object_id: Annotated[StrictStr, Field(description="Unique record identifier.")], request_options: Optional[Union[dict, RequestOptions]] = None, @@ -752,11 +777,14 @@ async def get_recommend_status_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - model: Annotated[ - RecommendModels, - Field( - description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " - ), + model: Union[ + Annotated[ + RecommendModels, + Field( + description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " + ), + ], + str, ], task_id: Annotated[StrictInt, Field(description="Unique task identifier.")], request_options: Optional[Union[dict, RequestOptions]] = None, @@ -811,11 +839,14 @@ async def get_recommend_status( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - model: Annotated[ - RecommendModels, - Field( - description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " - ), + model: Union[ + Annotated[ + RecommendModels, + Field( + description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " + ), + ], + str, ], task_id: Annotated[StrictInt, Field(description="Unique task identifier.")], request_options: Optional[Union[dict, RequestOptions]] = None, @@ -842,7 +873,7 @@ async def get_recommend_status( async def get_recommendations_with_http_info( self, - get_recommendations_params: GetRecommendationsParams, + get_recommendations_params: Union[GetRecommendationsParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -870,7 +901,7 @@ async def get_recommendations_with_http_info( verb=Verb.POST, path="/1/indexes/*/recommendations", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=True, @@ -878,7 +909,7 @@ async def get_recommendations_with_http_info( async def get_recommendations( self, - get_recommendations_params: GetRecommendationsParams, + get_recommendations_params: Union[GetRecommendationsParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> GetRecommendationsResponse: """ @@ -903,13 +934,18 @@ async def search_recommend_rules_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - model: Annotated[ - RecommendModels, - Field( - description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " - ), + model: Union[ + Annotated[ + RecommendModels, + Field( + description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " + ), + ], + str, ], - search_recommend_rules_params: Optional[SearchRecommendRulesParams] = None, + search_recommend_rules_params: Union[ + Optional[SearchRecommendRulesParams], dict[str, Any] + ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -948,7 +984,7 @@ async def search_recommend_rules_with_http_info( "{indexName}", quote(str(index_name), safe="") ).replace("{model}", quote(str(model), safe="")), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=True, @@ -960,13 +996,18 @@ async def search_recommend_rules( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - model: Annotated[ - RecommendModels, - Field( - description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " - ), + model: Union[ + Annotated[ + RecommendModels, + Field( + description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " + ), + ], + str, ], - search_recommend_rules_params: Optional[SearchRecommendRulesParams] = None, + search_recommend_rules_params: Union[ + Optional[SearchRecommendRulesParams], dict[str, Any] + ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SearchRecommendRulesResponse: """ @@ -1009,7 +1050,7 @@ class RecommendClientSync: """ _transporter: TransporterSync - _config: RecommendConfig + _config: BaseConfig _request_options: RequestOptions def __init__( @@ -1020,7 +1061,9 @@ def __init__( config: Optional[RecommendConfig] = None, ) -> None: if transporter is not None and config is None: - config = transporter._config + config = RecommendConfig( + transporter.config.app_id, transporter.config.api_key + ) if config is None: config = RecommendConfig(app_id, api_key) @@ -1070,7 +1113,7 @@ def close(self) -> None: def set_client_api_key(self, api_key: str) -> None: """Sets a new API key to authenticate requests.""" - self._transporter._config.set_client_api_key(api_key) + self._transporter.config.set_client_api_key(api_key) def batch_recommend_rules_with_http_info( self, @@ -1078,13 +1121,18 @@ def batch_recommend_rules_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - model: Annotated[ - RecommendModels, - Field( - description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " - ), + model: Union[ + Annotated[ + RecommendModels, + Field( + description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " + ), + ], + str, ], - recommend_rule: Optional[List[RecommendRule]] = None, + recommend_rule: Union[ + Optional[List[RecommendRule]], list[dict[str, Any]] + ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -1123,7 +1171,7 @@ def batch_recommend_rules_with_http_info( "{indexName}", quote(str(index_name), safe="") ).replace("{model}", quote(str(model), safe="")), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -1135,13 +1183,18 @@ def batch_recommend_rules( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - model: Annotated[ - RecommendModels, - Field( - description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " - ), + model: Union[ + Annotated[ + RecommendModels, + Field( + description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " + ), + ], + str, ], - recommend_rule: Optional[List[RecommendRule]] = None, + recommend_rule: Union[ + Optional[List[RecommendRule]], list[dict[str, Any]] + ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> RecommendUpdatedAtResponse: """ @@ -1195,11 +1248,11 @@ def custom_delete_with_http_info( "Parameter `path` is required when calling `custom_delete`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return self._transporter.request( verb=Verb.DELETE, @@ -1268,11 +1321,11 @@ def custom_get_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_get`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return self._transporter.request( verb=Verb.GET, @@ -1347,11 +1400,11 @@ def custom_post_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_post`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -1362,7 +1415,7 @@ def custom_post_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -1437,11 +1490,11 @@ def custom_put_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_put`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -1452,7 +1505,7 @@ def custom_put_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -1498,11 +1551,14 @@ def delete_recommend_rule_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - model: Annotated[ - RecommendModels, - Field( - description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " - ), + model: Union[ + Annotated[ + RecommendModels, + Field( + description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " + ), + ], + str, ], object_id: Annotated[StrictStr, Field(description="Unique record identifier.")], request_options: Optional[Union[dict, RequestOptions]] = None, @@ -1557,11 +1613,14 @@ def delete_recommend_rule( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - model: Annotated[ - RecommendModels, - Field( - description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " - ), + model: Union[ + Annotated[ + RecommendModels, + Field( + description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " + ), + ], + str, ], object_id: Annotated[StrictStr, Field(description="Unique record identifier.")], request_options: Optional[Union[dict, RequestOptions]] = None, @@ -1592,11 +1651,14 @@ def get_recommend_rule_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - model: Annotated[ - RecommendModels, - Field( - description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " - ), + model: Union[ + Annotated[ + RecommendModels, + Field( + description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " + ), + ], + str, ], object_id: Annotated[StrictStr, Field(description="Unique record identifier.")], request_options: Optional[Union[dict, RequestOptions]] = None, @@ -1651,11 +1713,14 @@ def get_recommend_rule( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - model: Annotated[ - RecommendModels, - Field( - description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " - ), + model: Union[ + Annotated[ + RecommendModels, + Field( + description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " + ), + ], + str, ], object_id: Annotated[StrictStr, Field(description="Unique record identifier.")], request_options: Optional[Union[dict, RequestOptions]] = None, @@ -1686,11 +1751,14 @@ def get_recommend_status_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - model: Annotated[ - RecommendModels, - Field( - description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " - ), + model: Union[ + Annotated[ + RecommendModels, + Field( + description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " + ), + ], + str, ], task_id: Annotated[StrictInt, Field(description="Unique task identifier.")], request_options: Optional[Union[dict, RequestOptions]] = None, @@ -1745,11 +1813,14 @@ def get_recommend_status( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - model: Annotated[ - RecommendModels, - Field( - description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " - ), + model: Union[ + Annotated[ + RecommendModels, + Field( + description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " + ), + ], + str, ], task_id: Annotated[StrictInt, Field(description="Unique task identifier.")], request_options: Optional[Union[dict, RequestOptions]] = None, @@ -1776,7 +1847,7 @@ def get_recommend_status( def get_recommendations_with_http_info( self, - get_recommendations_params: GetRecommendationsParams, + get_recommendations_params: Union[GetRecommendationsParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -1804,7 +1875,7 @@ def get_recommendations_with_http_info( verb=Verb.POST, path="/1/indexes/*/recommendations", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=True, @@ -1812,7 +1883,7 @@ def get_recommendations_with_http_info( def get_recommendations( self, - get_recommendations_params: GetRecommendationsParams, + get_recommendations_params: Union[GetRecommendationsParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> GetRecommendationsResponse: """ @@ -1837,13 +1908,18 @@ def search_recommend_rules_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - model: Annotated[ - RecommendModels, - Field( - description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " - ), + model: Union[ + Annotated[ + RecommendModels, + Field( + description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " + ), + ], + str, ], - search_recommend_rules_params: Optional[SearchRecommendRulesParams] = None, + search_recommend_rules_params: Union[ + Optional[SearchRecommendRulesParams], dict[str, Any] + ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -1882,7 +1958,7 @@ def search_recommend_rules_with_http_info( "{indexName}", quote(str(index_name), safe="") ).replace("{model}", quote(str(model), safe="")), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=True, @@ -1894,13 +1970,18 @@ def search_recommend_rules( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - model: Annotated[ - RecommendModels, - Field( - description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " - ), + model: Union[ + Annotated[ + RecommendModels, + Field( + description="[Recommend model](https://www.algolia.com/doc/guides/algolia-recommend/overview/#recommend-models). " + ), + ], + str, ], - search_recommend_rules_params: Optional[SearchRecommendRulesParams] = None, + search_recommend_rules_params: Union[ + Optional[SearchRecommendRulesParams], dict[str, Any] + ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SearchRecommendRulesResponse: """ diff --git a/algoliasearch/recommend/config.py b/algoliasearch/recommend/config.py index 77268702e..ddcba84f8 100644 --- a/algoliasearch/recommend/config.py +++ b/algoliasearch/recommend/config.py @@ -1,4 +1,5 @@ from os import environ +from typing import Optional from algoliasearch.http.base_config import BaseConfig from algoliasearch.http.hosts import CallType, Host, HostsCollection @@ -6,11 +7,14 @@ class RecommendConfig(BaseConfig): - def __init__(self, app_id: str, api_key: str) -> None: + def __init__(self, app_id: Optional[str], api_key: Optional[str]) -> None: super().__init__(app_id, api_key) user_agent = UserAgent().add("Recommend") + assert app_id, "`app_id` is missing." + assert api_key, "`api_key` is missing." + self.headers = { "x-algolia-application-id": app_id, "x-algolia-api-key": api_key, diff --git a/algoliasearch/recommend/models/around_precision.py b/algoliasearch/recommend/models/around_precision.py index 20778614b..eca877706 100644 --- a/algoliasearch/recommend/models/around_precision.py +++ b/algoliasearch/recommend/models/around_precision.py @@ -30,7 +30,7 @@ class AroundPrecision(BaseModel): """ Distance in meters to group results by similar distances. For example, if you set `aroundPrecision` to 100, records wihin 100 meters to the central coordinate are considered to have the same distance, as are records between 100 and 199 meters. """ oneof_schema_2_validator: Optional[List[Range]] = Field(default=None) - actual_instance: Optional[Union[List[Range], int]] = None + actual_instance: Union[List[Range], int, None] = None one_of_schemas: Set[str] = {"List[Range]", "int"} def __init__(self, *args, **kwargs) -> None: @@ -43,12 +43,12 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[List[Range], int]]: + def unwrap_actual_instance(self) -> Union[List[Range], int, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -91,9 +91,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -103,8 +103,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], List[Range], int]]: return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/recommend/models/around_radius.py b/algoliasearch/recommend/models/around_radius.py index 5b993cf35..508f57c9c 100644 --- a/algoliasearch/recommend/models/around_radius.py +++ b/algoliasearch/recommend/models/around_radius.py @@ -30,7 +30,7 @@ class AroundRadius(BaseModel): """ Maximum search radius around a central location in meters. """ oneof_schema_2_validator: Optional[AroundRadiusAll] = Field(default=None) - actual_instance: Optional[Union[AroundRadiusAll, int]] = None + actual_instance: Union[AroundRadiusAll, int, None] = None one_of_schemas: Set[str] = {"AroundRadiusAll", "int"} def __init__(self, *args, **kwargs) -> None: @@ -43,12 +43,12 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[AroundRadiusAll, int]]: + def unwrap_actual_instance(self) -> Union[AroundRadiusAll, int, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -90,9 +90,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -102,8 +102,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], AroundRadiusAll, int]]: return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/recommend/models/auto_facet_filter.py b/algoliasearch/recommend/models/auto_facet_filter.py index 0f95582fc..06b49f4a7 100644 --- a/algoliasearch/recommend/models/auto_facet_filter.py +++ b/algoliasearch/recommend/models/auto_facet_filter.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "facet": "facet", + "negative": "negative", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class AutoFacetFilter(BaseModel): """ Facet attribute. Only recommendations with the same value (or only recommendations with a different value) as the original viewed item are included. """ - facet: Optional[str] = Field(default=None, alias="facet") + facet: Optional[str] = None """ Facet attribute. """ - negative: Optional[bool] = Field(default=None, alias="negative") + negative: Optional[bool] = None """ Whether the filter is negative. If true, recommendations must not have the same value for the `facet` attribute. If false, recommendations must have the same value for the `facet` attribute. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class AutoFacetFilter(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/banner.py b/algoliasearch/recommend/models/banner.py new file mode 100644 index 000000000..3abcadb71 --- /dev/null +++ b/algoliasearch/recommend/models/banner.py @@ -0,0 +1,84 @@ +# coding: utf-8 + +""" +Code generated by OpenAPI Generator (https://openapi-generator.tech), manual changes will be lost - read more on https://github.com/algolia/api-clients-automation. DO NOT EDIT. +""" + +from __future__ import annotations + +from json import loads +from sys import version_info +from typing import Any, Dict, Optional + +from pydantic import BaseModel, ConfigDict + +if version_info >= (3, 11): + from typing import Self +else: + from typing_extensions import Self + + +from algoliasearch.recommend.models.banner_image import BannerImage +from algoliasearch.recommend.models.banner_link import BannerLink + +_ALIASES = { + "image": "image", + "link": "link", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + +class Banner(BaseModel): + """ + a search banner with image and url. + """ + + image: Optional[BannerImage] = None + link: Optional[BannerLink] = None + + model_config = ConfigDict( + use_enum_values=True, + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + alias_generator=_alias_generator, + ) + + def to_json(self) -> str: + return self.model_dump_json(by_alias=True, exclude_unset=True) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of Banner from a JSON string""" + return cls.from_dict(loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias.""" + return self.model_dump( + by_alias=True, + exclude_none=True, + exclude_unset=True, + ) + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of Banner from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + obj["image"] = ( + BannerImage.from_dict(obj["image"]) + if obj.get("image") is not None + else None + ) + obj["link"] = ( + BannerLink.from_dict(obj["link"]) if obj.get("link") is not None else None + ) + + return cls.model_validate(obj) diff --git a/algoliasearch/recommend/models/banner_image.py b/algoliasearch/recommend/models/banner_image.py new file mode 100644 index 000000000..2b960bd56 --- /dev/null +++ b/algoliasearch/recommend/models/banner_image.py @@ -0,0 +1,80 @@ +# coding: utf-8 + +""" +Code generated by OpenAPI Generator (https://openapi-generator.tech), manual changes will be lost - read more on https://github.com/algolia/api-clients-automation. DO NOT EDIT. +""" + +from __future__ import annotations + +from json import loads +from sys import version_info +from typing import Any, Dict, Optional + +from pydantic import BaseModel, ConfigDict + +if version_info >= (3, 11): + from typing import Self +else: + from typing_extensions import Self + + +from algoliasearch.recommend.models.banner_image_url import BannerImageUrl + +_ALIASES = { + "urls": "urls", + "title": "title", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + +class BannerImage(BaseModel): + """ + image of a search banner. + """ + + urls: Optional[BannerImageUrl] = None + title: Optional[str] = None + + model_config = ConfigDict( + use_enum_values=True, + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + alias_generator=_alias_generator, + ) + + def to_json(self) -> str: + return self.model_dump_json(by_alias=True, exclude_unset=True) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of BannerImage from a JSON string""" + return cls.from_dict(loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias.""" + return self.model_dump( + by_alias=True, + exclude_none=True, + exclude_unset=True, + ) + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of BannerImage from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + obj["urls"] = ( + BannerImageUrl.from_dict(obj["urls"]) + if obj.get("urls") is not None + else None + ) + + return cls.model_validate(obj) diff --git a/algoliasearch/recommend/models/banner_image_url.py b/algoliasearch/recommend/models/banner_image_url.py new file mode 100644 index 000000000..630f01a1f --- /dev/null +++ b/algoliasearch/recommend/models/banner_image_url.py @@ -0,0 +1,70 @@ +# coding: utf-8 + +""" +Code generated by OpenAPI Generator (https://openapi-generator.tech), manual changes will be lost - read more on https://github.com/algolia/api-clients-automation. DO NOT EDIT. +""" + +from __future__ import annotations + +from json import loads +from sys import version_info +from typing import Any, Dict, Optional + +from pydantic import BaseModel, ConfigDict + +if version_info >= (3, 11): + from typing import Self +else: + from typing_extensions import Self + + +_ALIASES = { + "url": "url", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + +class BannerImageUrl(BaseModel): + """ + url for a search banner image. + """ + + url: Optional[str] = None + + model_config = ConfigDict( + use_enum_values=True, + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + alias_generator=_alias_generator, + ) + + def to_json(self) -> str: + return self.model_dump_json(by_alias=True, exclude_unset=True) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of BannerImageUrl from a JSON string""" + return cls.from_dict(loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias.""" + return self.model_dump( + by_alias=True, + exclude_none=True, + exclude_unset=True, + ) + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of BannerImageUrl from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + return cls.model_validate(obj) diff --git a/algoliasearch/recommend/models/banner_link.py b/algoliasearch/recommend/models/banner_link.py new file mode 100644 index 000000000..d1edada9a --- /dev/null +++ b/algoliasearch/recommend/models/banner_link.py @@ -0,0 +1,70 @@ +# coding: utf-8 + +""" +Code generated by OpenAPI Generator (https://openapi-generator.tech), manual changes will be lost - read more on https://github.com/algolia/api-clients-automation. DO NOT EDIT. +""" + +from __future__ import annotations + +from json import loads +from sys import version_info +from typing import Any, Dict, Optional + +from pydantic import BaseModel, ConfigDict + +if version_info >= (3, 11): + from typing import Self +else: + from typing_extensions import Self + + +_ALIASES = { + "url": "url", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + +class BannerLink(BaseModel): + """ + link for a banner defined in merchandising studio. + """ + + url: Optional[str] = None + + model_config = ConfigDict( + use_enum_values=True, + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + alias_generator=_alias_generator, + ) + + def to_json(self) -> str: + return self.model_dump_json(by_alias=True, exclude_unset=True) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of BannerLink from a JSON string""" + return cls.from_dict(loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias.""" + return self.model_dump( + by_alias=True, + exclude_none=True, + exclude_unset=True, + ) + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of BannerLink from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + return cls.model_validate(obj) diff --git a/algoliasearch/recommend/models/banners.py b/algoliasearch/recommend/models/banners.py new file mode 100644 index 000000000..83ea01f02 --- /dev/null +++ b/algoliasearch/recommend/models/banners.py @@ -0,0 +1,76 @@ +# coding: utf-8 + +""" +Code generated by OpenAPI Generator (https://openapi-generator.tech), manual changes will be lost - read more on https://github.com/algolia/api-clients-automation. DO NOT EDIT. +""" + +from __future__ import annotations + +from json import loads +from sys import version_info +from typing import Any, Dict, Optional + +from pydantic import BaseModel, ConfigDict + +if version_info >= (3, 11): + from typing import Self +else: + from typing_extensions import Self + + +from algoliasearch.recommend.models.banner import Banner + +_ALIASES = { + "banners": "banners", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + +class Banners(BaseModel): + """ + banners defined in the merchandising studio for the given search. + """ + + banners: Optional[Banner] = None + + model_config = ConfigDict( + use_enum_values=True, + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + alias_generator=_alias_generator, + ) + + def to_json(self) -> str: + return self.model_dump_json(by_alias=True, exclude_unset=True) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of Banners from a JSON string""" + return cls.from_dict(loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias.""" + return self.model_dump( + by_alias=True, + exclude_none=True, + exclude_unset=True, + ) + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of Banners from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + obj["banners"] = ( + Banner.from_dict(obj["banners"]) if obj.get("banners") is not None else None + ) + + return cls.model_validate(obj) diff --git a/algoliasearch/recommend/models/bought_together_query.py b/algoliasearch/recommend/models/bought_together_query.py index 937b7eaba..407609aa4 100644 --- a/algoliasearch/recommend/models/bought_together_query.py +++ b/algoliasearch/recommend/models/bought_together_query.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,23 +21,34 @@ from algoliasearch.recommend.models.fbt_model import FbtModel from algoliasearch.recommend.models.recommend_search_params import RecommendSearchParams +_ALIASES = { + "index_name": "indexName", + "threshold": "threshold", + "max_recommendations": "maxRecommendations", + "query_parameters": "queryParameters", + "model": "model", + "object_id": "objectID", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class BoughtTogetherQuery(BaseModel): """ BoughtTogetherQuery """ - index_name: str = Field(alias="indexName") + index_name: str """ Index name (case-sensitive). """ - threshold: float = Field(alias="threshold") + threshold: float """ Minimum score a recommendation must have to be included in the response. """ - max_recommendations: Optional[int] = Field(default=None, alias="maxRecommendations") + max_recommendations: Optional[int] = None """ Maximum number of recommendations to retrieve. By default, all recommendations are returned and no fallback request is made. Depending on the available recommendations and the other request parameters, the actual number of recommendations may be lower than this value. """ - query_parameters: Optional[RecommendSearchParams] = Field( - default=None, alias="queryParameters" - ) - model: FbtModel = Field(alias="model") - object_id: str = Field(alias="objectID") + query_parameters: Optional[RecommendSearchParams] = None + model: FbtModel + object_id: str """ Unique record identifier. """ model_config = ConfigDict( @@ -45,6 +56,7 @@ class BoughtTogetherQuery(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/condition.py b/algoliasearch/recommend/models/condition.py index 2e8b8b739..7719c36a6 100644 --- a/algoliasearch/recommend/models/condition.py +++ b/algoliasearch/recommend/models/condition.py @@ -11,7 +11,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field, field_validator +from pydantic import BaseModel, ConfigDict, field_validator if version_info >= (3, 11): from typing import Self @@ -19,14 +19,24 @@ from typing_extensions import Self +_ALIASES = { + "filters": "filters", + "context": "context", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class Condition(BaseModel): """ Condition that triggers the rule. If not specified, the rule is triggered for all recommendations. """ - filters: Optional[str] = Field(default=None, alias="filters") + filters: Optional[str] = None """ Filter expression to only include items that match the filter criteria in the response. You can use these filter expressions: - **Numeric filters.** ` `, where `` is one of `<`, `<=`, `=`, `!=`, `>`, `>=`. - **Ranges.** `: TO ` where `` and `` are the lower and upper limits of the range (inclusive). - **Facet filters.** `:` where `` is a facet attribute (case-sensitive) and `` a facet value. - **Tag filters.** `_tags:` or just `` (case-sensitive). - **Boolean filters.** `: true | false`. You can combine filters with `AND`, `OR`, and `NOT` operators with the following restrictions: - You can only combine filters of the same type with `OR`. **Not supported:** `facet:value OR num > 3`. - You can't use `NOT` with combinations of filters. **Not supported:** `NOT(facet:value OR facet:value)` - You can't combine conjunctions (`AND`) with `OR`. **Not supported:** `facet:value OR (facet:value AND facet:value)` Use quotes around your filters, if the facet attribute name or facet value has spaces, keywords (`OR`, `AND`, `NOT`), or quotes. If a facet attribute is an array, the filter matches if it matches at least one element of the array. For more information, see [Filters](https://www.algolia.com/doc/guides/managing-results/refine-results/filtering/). """ - context: Optional[str] = Field(default=None, alias="context") + context: Optional[str] = None """ An additional restriction that only triggers the rule, when the search has the same value as `ruleContexts` parameter. For example, if `context: mobile`, the rule is only triggered when the search request has a matching `ruleContexts: mobile`. A rule context must only contain alphanumeric characters. """ @field_validator("context") @@ -44,6 +54,7 @@ def context_validate_regular_expression(cls, value): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/consequence.py b/algoliasearch/recommend/models/consequence.py index 98168e52c..326f9bb19 100644 --- a/algoliasearch/recommend/models/consequence.py +++ b/algoliasearch/recommend/models/consequence.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -24,25 +24,34 @@ PromoteConsequenceObject, ) +_ALIASES = { + "hide": "hide", + "promote": "promote", + "params": "params", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class Consequence(BaseModel): """ Effect of the rule. """ - hide: Optional[List[HideConsequenceObject]] = Field(default=None, alias="hide") + hide: Optional[List[HideConsequenceObject]] = None """ Exclude items from recommendations. """ - promote: Optional[List[PromoteConsequenceObject]] = Field( - default=None, alias="promote" - ) + promote: Optional[List[PromoteConsequenceObject]] = None """ Place items at specific positions in the list of recommendations. """ - params: Optional[ParamsConsequence] = Field(default=None, alias="params") + params: Optional[ParamsConsequence] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/deleted_at_response.py b/algoliasearch/recommend/models/deleted_at_response.py index 5de986f76..c720da065 100644 --- a/algoliasearch/recommend/models/deleted_at_response.py +++ b/algoliasearch/recommend/models/deleted_at_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "task_id": "taskID", + "deleted_at": "deletedAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class DeletedAtResponse(BaseModel): """ Response, taskID, and deletion timestamp. """ - task_id: int = Field(alias="taskID") + task_id: int """ Unique identifier of a task. A successful API response means that a task was added to a queue. It might not run immediately. You can check the task's progress with the [`task` operation](#tag/Indices/operation/getTask) and this `taskID`. """ - deleted_at: str = Field(alias="deletedAt") + deleted_at: str """ Date and time when the object was deleted, in RFC 3339 format. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class DeletedAtResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/distinct.py b/algoliasearch/recommend/models/distinct.py index fcc03353d..6f375f0b4 100644 --- a/algoliasearch/recommend/models/distinct.py +++ b/algoliasearch/recommend/models/distinct.py @@ -27,7 +27,7 @@ class Distinct(BaseModel): """ Whether deduplication is turned on. If true, only one member of a group is shown in the search results. """ oneof_schema_2_validator: Optional[int] = Field(default=None) """ Number of members of a group of records to include in the search results. - Don't use `distinct > 1` for records that might be [promoted by rules](https://www.algolia.com/doc/guides/managing-results/rules/merchandising-and-promoting/how-to/promote-hits/). The number of hits won't be correct and faceting won't work as expected. - With `distinct > 1`, the `hitsPerPage` parameter controls the number of returned groups. For example, with `hitsPerPage: 10` and `distinct: 2`, up to 20 records are returned. Likewise, the `nbHits` response attribute contains the number of returned groups. """ - actual_instance: Optional[Union[bool, int]] = None + actual_instance: Union[bool, int, None] = None one_of_schemas: Set[str] = {"bool", "int"} def __init__(self, *args, **kwargs) -> None: @@ -40,12 +40,12 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[bool, int]]: + def unwrap_actual_instance(self) -> Union[bool, int, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -88,9 +88,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -100,8 +100,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], bool, int]]: return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/recommend/models/error_base.py b/algoliasearch/recommend/models/error_base.py index 075d8a3ac..01e47a756 100644 --- a/algoliasearch/recommend/models/error_base.py +++ b/algoliasearch/recommend/models/error_base.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,28 @@ from typing_extensions import Self +_ALIASES = { + "message": "message", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class ErrorBase(BaseModel): """ Error. """ - message: Optional[str] = Field(default=None, alias="message") + message: Optional[str] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, extra="allow", ) diff --git a/algoliasearch/recommend/models/exhaustive.py b/algoliasearch/recommend/models/exhaustive.py index 1b7e3b403..36b73d024 100644 --- a/algoliasearch/recommend/models/exhaustive.py +++ b/algoliasearch/recommend/models/exhaustive.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,20 +18,33 @@ from typing_extensions import Self +_ALIASES = { + "facets_count": "facetsCount", + "facet_values": "facetValues", + "nb_hits": "nbHits", + "rules_match": "rulesMatch", + "typo": "typo", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class Exhaustive(BaseModel): """ Whether certain properties of the search response are calculated exhaustive (exact) or approximated. """ - facets_count: Optional[bool] = Field(default=None, alias="facetsCount") + facets_count: Optional[bool] = None """ Whether the facet count is exhaustive (`true`) or approximate (`false`). See the [related discussion](https://support.algolia.com/hc/en-us/articles/4406975248145-Why-are-my-facet-and-hit-counts-not-accurate-). """ - facet_values: Optional[bool] = Field(default=None, alias="facetValues") + facet_values: Optional[bool] = None """ The value is `false` if not all facet values are retrieved. """ - nb_hits: Optional[bool] = Field(default=None, alias="nbHits") + nb_hits: Optional[bool] = None """ Whether the `nbHits` is exhaustive (`true`) or approximate (`false`). When the query takes more than 50ms to be processed, the engine makes an approximation. This can happen when using complex filters on millions of records, when typo-tolerance was not exhaustive, or when enough hits have been retrieved (for example, after the engine finds 10,000 exact matches). `nbHits` is reported as non-exhaustive whenever an approximation is made, even if the approximation didn’t, in the end, impact the exhaustivity of the query. """ - rules_match: Optional[bool] = Field(default=None, alias="rulesMatch") + rules_match: Optional[bool] = None """ Rules matching exhaustivity. The value is `false` if rules were enable for this query, and could not be fully processed due a timeout. This is generally caused by the number of alternatives (such as typos) which is too large. """ - typo: Optional[bool] = Field(default=None, alias="typo") + typo: Optional[bool] = None """ Whether the typo search was exhaustive (`true`) or approximate (`false`). An approximation is done when the typo search query part takes more than 10% of the query budget (ie. 5ms by default) to be processed (this can happen when a lot of typo alternatives exist for the query). This field will not be included when typo-tolerance is entirely disabled. """ model_config = ConfigDict( @@ -39,6 +52,7 @@ class Exhaustive(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/facet_filters.py b/algoliasearch/recommend/models/facet_filters.py index 9669c5fb0..e31fc57d6 100644 --- a/algoliasearch/recommend/models/facet_filters.py +++ b/algoliasearch/recommend/models/facet_filters.py @@ -27,7 +27,7 @@ class FacetFilters(BaseModel): oneof_schema_2_validator: Optional[str] = Field(default=None) - actual_instance: Optional[Union[List[FacetFilters], str]] = None + actual_instance: Union[List[FacetFilters], str, None] = None one_of_schemas: Set[str] = {"List[FacetFilters]", "str"} def __init__(self, *args, **kwargs) -> None: @@ -40,12 +40,12 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[List[FacetFilters], str]]: + def unwrap_actual_instance(self) -> Union[List[FacetFilters], str, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -88,9 +88,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -100,8 +100,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], List[FacetFilters], str]]: return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/recommend/models/facet_ordering.py b/algoliasearch/recommend/models/facet_ordering.py index 99d8bf4f0..c66878e9b 100644 --- a/algoliasearch/recommend/models/facet_ordering.py +++ b/algoliasearch/recommend/models/facet_ordering.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,14 +21,23 @@ from algoliasearch.recommend.models.index_settings_facets import IndexSettingsFacets from algoliasearch.recommend.models.value import Value +_ALIASES = { + "facets": "facets", + "values": "values", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class FacetOrdering(BaseModel): """ Order of facet names and facet values in your UI. """ - facets: Optional[IndexSettingsFacets] = Field(default=None, alias="facets") - values: Optional[Dict[str, Value]] = Field(default=None, alias="values") + facets: Optional[IndexSettingsFacets] = None + values: Optional[Dict[str, Value]] = None """ Order of facet values. One object for each facet. """ model_config = ConfigDict( @@ -36,6 +45,7 @@ class FacetOrdering(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/facet_stats.py b/algoliasearch/recommend/models/facet_stats.py index ada778720..d59c834b9 100644 --- a/algoliasearch/recommend/models/facet_stats.py +++ b/algoliasearch/recommend/models/facet_stats.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,30 @@ from typing_extensions import Self +_ALIASES = { + "min": "min", + "max": "max", + "avg": "avg", + "sum": "sum", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class FacetStats(BaseModel): """ FacetStats """ - min: Optional[float] = Field(default=None, alias="min") + min: Optional[float] = None """ Minimum value in the results. """ - max: Optional[float] = Field(default=None, alias="max") + max: Optional[float] = None """ Maximum value in the results. """ - avg: Optional[float] = Field(default=None, alias="avg") + avg: Optional[float] = None """ Average facet value in the results. """ - sum: Optional[float] = Field(default=None, alias="sum") + sum: Optional[float] = None """ Sum of all values in the results. """ model_config = ConfigDict( @@ -37,6 +49,7 @@ class FacetStats(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/fallback_params.py b/algoliasearch/recommend/models/fallback_params.py index 5f8d24c48..02caad55e 100644 --- a/algoliasearch/recommend/models/fallback_params.py +++ b/algoliasearch/recommend/models/fallback_params.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -43,271 +43,266 @@ from algoliasearch.recommend.models.tag_filters import TagFilters from algoliasearch.recommend.models.typo_tolerance import TypoTolerance +_ALIASES = { + "similar_query": "similarQuery", + "filters": "filters", + "facet_filters": "facetFilters", + "optional_filters": "optionalFilters", + "numeric_filters": "numericFilters", + "tag_filters": "tagFilters", + "sum_or_filters_scores": "sumOrFiltersScores", + "restrict_searchable_attributes": "restrictSearchableAttributes", + "facets": "facets", + "faceting_after_distinct": "facetingAfterDistinct", + "around_lat_lng": "aroundLatLng", + "around_lat_lng_via_ip": "aroundLatLngViaIP", + "around_radius": "aroundRadius", + "around_precision": "aroundPrecision", + "minimum_around_radius": "minimumAroundRadius", + "inside_bounding_box": "insideBoundingBox", + "inside_polygon": "insidePolygon", + "natural_languages": "naturalLanguages", + "rule_contexts": "ruleContexts", + "personalization_impact": "personalizationImpact", + "user_token": "userToken", + "get_ranking_info": "getRankingInfo", + "synonyms": "synonyms", + "click_analytics": "clickAnalytics", + "analytics": "analytics", + "analytics_tags": "analyticsTags", + "percentile_computation": "percentileComputation", + "enable_ab_test": "enableABTest", + "query": "query", + "attributes_for_faceting": "attributesForFaceting", + "replicas": "replicas", + "pagination_limited_to": "paginationLimitedTo", + "unretrievable_attributes": "unretrievableAttributes", + "disable_typo_tolerance_on_words": "disableTypoToleranceOnWords", + "attributes_to_transliterate": "attributesToTransliterate", + "camel_case_attributes": "camelCaseAttributes", + "decompounded_attributes": "decompoundedAttributes", + "index_languages": "indexLanguages", + "disable_prefix_on_attributes": "disablePrefixOnAttributes", + "allow_compression_of_integer_array": "allowCompressionOfIntegerArray", + "numeric_attributes_for_filtering": "numericAttributesForFiltering", + "separators_to_index": "separatorsToIndex", + "searchable_attributes": "searchableAttributes", + "user_data": "userData", + "custom_normalization": "customNormalization", + "attribute_for_distinct": "attributeForDistinct", + "attributes_to_retrieve": "attributesToRetrieve", + "ranking": "ranking", + "relevancy_strictness": "relevancyStrictness", + "attributes_to_highlight": "attributesToHighlight", + "attributes_to_snippet": "attributesToSnippet", + "highlight_pre_tag": "highlightPreTag", + "highlight_post_tag": "highlightPostTag", + "snippet_ellipsis_text": "snippetEllipsisText", + "restrict_highlight_and_snippet_arrays": "restrictHighlightAndSnippetArrays", + "min_word_sizefor1_typo": "minWordSizefor1Typo", + "min_word_sizefor2_typos": "minWordSizefor2Typos", + "typo_tolerance": "typoTolerance", + "allow_typos_on_numeric_tokens": "allowTyposOnNumericTokens", + "disable_typo_tolerance_on_attributes": "disableTypoToleranceOnAttributes", + "ignore_plurals": "ignorePlurals", + "remove_stop_words": "removeStopWords", + "query_languages": "queryLanguages", + "decompound_query": "decompoundQuery", + "enable_rules": "enableRules", + "enable_personalization": "enablePersonalization", + "query_type": "queryType", + "remove_words_if_no_results": "removeWordsIfNoResults", + "advanced_syntax": "advancedSyntax", + "optional_words": "optionalWords", + "disable_exact_on_attributes": "disableExactOnAttributes", + "exact_on_single_word_query": "exactOnSingleWordQuery", + "alternatives_as_exact": "alternativesAsExact", + "advanced_syntax_features": "advancedSyntaxFeatures", + "distinct": "distinct", + "replace_synonyms_in_highlight": "replaceSynonymsInHighlight", + "min_proximity": "minProximity", + "response_fields": "responseFields", + "max_facet_hits": "maxFacetHits", + "max_values_per_facet": "maxValuesPerFacet", + "sort_facet_values_by": "sortFacetValuesBy", + "attribute_criteria_computed_by_min_proximity": "attributeCriteriaComputedByMinProximity", + "rendering_content": "renderingContent", + "enable_re_ranking": "enableReRanking", + "re_ranking_apply_filter": "reRankingApplyFilter", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class FallbackParams(BaseModel): """ FallbackParams """ - similar_query: Optional[str] = Field(default=None, alias="similarQuery") + similar_query: Optional[str] = None """ Keywords to be used instead of the search query to conduct a more broader search. Using the `similarQuery` parameter changes other settings: - `queryType` is set to `prefixNone`. - `removeStopWords` is set to true. - `words` is set as the first ranking criterion. - All remaining words are treated as `optionalWords`. Since the `similarQuery` is supposed to do a broad search, they usually return many results. Combine it with `filters` to narrow down the list of results. """ - filters: Optional[str] = Field(default=None, alias="filters") + filters: Optional[str] = None """ Filter expression to only include items that match the filter criteria in the response. You can use these filter expressions: - **Numeric filters.** ` `, where `` is one of `<`, `<=`, `=`, `!=`, `>`, `>=`. - **Ranges.** `: TO ` where `` and `` are the lower and upper limits of the range (inclusive). - **Facet filters.** `:` where `` is a facet attribute (case-sensitive) and `` a facet value. - **Tag filters.** `_tags:` or just `` (case-sensitive). - **Boolean filters.** `: true | false`. You can combine filters with `AND`, `OR`, and `NOT` operators with the following restrictions: - You can only combine filters of the same type with `OR`. **Not supported:** `facet:value OR num > 3`. - You can't use `NOT` with combinations of filters. **Not supported:** `NOT(facet:value OR facet:value)` - You can't combine conjunctions (`AND`) with `OR`. **Not supported:** `facet:value OR (facet:value AND facet:value)` Use quotes around your filters, if the facet attribute name or facet value has spaces, keywords (`OR`, `AND`, `NOT`), or quotes. If a facet attribute is an array, the filter matches if it matches at least one element of the array. For more information, see [Filters](https://www.algolia.com/doc/guides/managing-results/refine-results/filtering/). """ - facet_filters: Optional[FacetFilters] = Field(default=None, alias="facetFilters") - optional_filters: Optional[OptionalFilters] = Field( - default=None, alias="optionalFilters" - ) - numeric_filters: Optional[NumericFilters] = Field( - default=None, alias="numericFilters" - ) - tag_filters: Optional[TagFilters] = Field(default=None, alias="tagFilters") - sum_or_filters_scores: Optional[bool] = Field( - default=None, alias="sumOrFiltersScores" - ) + facet_filters: Optional[FacetFilters] = None + optional_filters: Optional[OptionalFilters] = None + numeric_filters: Optional[NumericFilters] = None + tag_filters: Optional[TagFilters] = None + sum_or_filters_scores: Optional[bool] = None """ Whether to sum all filter scores. If true, all filter scores are summed. Otherwise, the maximum filter score is kept. For more information, see [filter scores](https://www.algolia.com/doc/guides/managing-results/refine-results/filtering/in-depth/filter-scoring/#accumulating-scores-with-sumorfiltersscores). """ - restrict_searchable_attributes: Optional[List[str]] = Field( - default=None, alias="restrictSearchableAttributes" - ) + restrict_searchable_attributes: Optional[List[str]] = None """ Restricts a search to a subset of your searchable attributes. Attribute names are case-sensitive. """ - facets: Optional[List[str]] = Field(default=None, alias="facets") + facets: Optional[List[str]] = None """ Facets for which to retrieve facet values that match the search criteria and the number of matching facet values. To retrieve all facets, use the wildcard character `*`. For more information, see [facets](https://www.algolia.com/doc/guides/managing-results/refine-results/faceting/#contextual-facet-values-and-counts). """ - faceting_after_distinct: Optional[bool] = Field( - default=None, alias="facetingAfterDistinct" - ) + faceting_after_distinct: Optional[bool] = None """ Whether faceting should be applied after deduplication with `distinct`. This leads to accurate facet counts when using faceting in combination with `distinct`. It's usually better to use `afterDistinct` modifiers in the `attributesForFaceting` setting, as `facetingAfterDistinct` only computes correct facet counts if all records have the same facet values for the `attributeForDistinct`. """ - around_lat_lng: Optional[str] = Field(default=None, alias="aroundLatLng") + around_lat_lng: Optional[str] = None """ Coordinates for the center of a circle, expressed as a comma-separated string of latitude and longitude. Only records included within circle around this central location are included in the results. The radius of the circle is determined by the `aroundRadius` and `minimumAroundRadius` settings. This parameter is ignored if you also specify `insidePolygon` or `insideBoundingBox`. """ - around_lat_lng_via_ip: Optional[bool] = Field( - default=None, alias="aroundLatLngViaIP" - ) + around_lat_lng_via_ip: Optional[bool] = None """ Whether to obtain the coordinates from the request's IP address. """ - around_radius: Optional[AroundRadius] = Field(default=None, alias="aroundRadius") - around_precision: Optional[AroundPrecision] = Field( - default=None, alias="aroundPrecision" - ) - minimum_around_radius: Optional[int] = Field( - default=None, alias="minimumAroundRadius" - ) + around_radius: Optional[AroundRadius] = None + around_precision: Optional[AroundPrecision] = None + minimum_around_radius: Optional[int] = None """ Minimum radius (in meters) for a search around a location when `aroundRadius` isn't set. """ - inside_bounding_box: Optional[List[List[float]]] = Field( - default=None, alias="insideBoundingBox" - ) + inside_bounding_box: Optional[List[List[float]]] = None """ Coordinates for a rectangular area in which to search. Each bounding box is defined by the two opposite points of its diagonal, and expressed as latitude and longitude pair: `[p1 lat, p1 long, p2 lat, p2 long]`. Provide multiple bounding boxes as nested arrays. For more information, see [rectangular area](https://www.algolia.com/doc/guides/managing-results/refine-results/geolocation/#filtering-inside-rectangular-or-polygonal-areas). """ - inside_polygon: Optional[List[List[float]]] = Field( - default=None, alias="insidePolygon" - ) + inside_polygon: Optional[List[List[float]]] = None """ Coordinates of a polygon in which to search. Polygons are defined by 3 to 10,000 points. Each point is represented by its latitude and longitude. Provide multiple polygons as nested arrays. For more information, see [filtering inside polygons](https://www.algolia.com/doc/guides/managing-results/refine-results/geolocation/#filtering-inside-rectangular-or-polygonal-areas). This parameter is ignored if you also specify `insideBoundingBox`. """ - natural_languages: Optional[List[SupportedLanguage]] = Field( - default=None, alias="naturalLanguages" - ) + natural_languages: Optional[List[SupportedLanguage]] = None """ ISO language codes that adjust settings that are useful for processing natural language queries (as opposed to keyword searches): - Sets `removeStopWords` and `ignorePlurals` to the list of provided languages. - Sets `removeWordsIfNoResults` to `allOptional`. - Adds a `natural_language` attribute to `ruleContexts` and `analyticsTags`. """ - rule_contexts: Optional[List[str]] = Field(default=None, alias="ruleContexts") + rule_contexts: Optional[List[str]] = None """ Assigns a rule context to the search query. [Rule contexts](https://www.algolia.com/doc/guides/managing-results/rules/rules-overview/how-to/customize-search-results-by-platform/#whats-a-context) are strings that you can use to trigger matching rules. """ - personalization_impact: Optional[int] = Field( - default=None, alias="personalizationImpact" - ) + personalization_impact: Optional[int] = None """ Impact that Personalization should have on this search. The higher this value is, the more Personalization determines the ranking compared to other factors. For more information, see [Understanding Personalization impact](https://www.algolia.com/doc/guides/personalization/personalizing-results/in-depth/configuring-personalization/#understanding-personalization-impact). """ - user_token: Optional[str] = Field(default=None, alias="userToken") + user_token: Optional[str] = None """ Unique pseudonymous or anonymous user identifier. This helps with analytics and click and conversion events. For more information, see [user token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - get_ranking_info: Optional[bool] = Field(default=None, alias="getRankingInfo") + get_ranking_info: Optional[bool] = None """ Whether the search response should include detailed ranking information. """ - synonyms: Optional[bool] = Field(default=None, alias="synonyms") + synonyms: Optional[bool] = None """ Whether to take into account an index's synonyms for this search. """ - click_analytics: Optional[bool] = Field(default=None, alias="clickAnalytics") + click_analytics: Optional[bool] = None """ Whether to include a `queryID` attribute in the response. The query ID is a unique identifier for a search query and is required for tracking [click and conversion events](https://www.algolia.com/guides/sending-events/getting-started/). """ - analytics: Optional[bool] = Field(default=None, alias="analytics") + analytics: Optional[bool] = None """ Whether this search will be included in Analytics. """ - analytics_tags: Optional[List[str]] = Field(default=None, alias="analyticsTags") + analytics_tags: Optional[List[str]] = None """ Tags to apply to the query for [segmenting analytics data](https://www.algolia.com/doc/guides/search-analytics/guides/segments/). """ - percentile_computation: Optional[bool] = Field( - default=None, alias="percentileComputation" - ) + percentile_computation: Optional[bool] = None """ Whether to include this search when calculating processing-time percentiles. """ - enable_ab_test: Optional[bool] = Field(default=None, alias="enableABTest") + enable_ab_test: Optional[bool] = None """ Whether to enable A/B testing for this search. """ - query: Optional[str] = Field(default=None, alias="query") + query: Optional[str] = None """ Search query. """ - attributes_for_faceting: Optional[List[str]] = Field( - default=None, alias="attributesForFaceting" - ) + attributes_for_faceting: Optional[List[str]] = None """ Attributes used for [faceting](https://www.algolia.com/doc/guides/managing-results/refine-results/faceting/). Facets are attributes that let you categorize search results. They can be used for filtering search results. By default, no attribute is used for faceting. Attribute names are case-sensitive. **Modifiers** - `filterOnly(\"ATTRIBUTE\")`. Allows using this attribute as a filter, but doesn't evalue the facet values. - `searchable(\"ATTRIBUTE\")`. Allows searching for facet values. - `afterDistinct(\"ATTRIBUTE\")`. Evaluates the facet count _after_ deduplication with `distinct`. This ensures accurate facet counts. You can apply this modifier to searchable facets: `afterDistinct(searchable(ATTRIBUTE))`. """ - replicas: Optional[List[str]] = Field(default=None, alias="replicas") + replicas: Optional[List[str]] = None """ Creates [replica indices](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/in-depth/replicas/). Replicas are copies of a primary index with the same records but different settings, synonyms, or rules. If you want to offer a different ranking or sorting of your search results, you'll use replica indices. All index operations on a primary index are automatically forwarded to its replicas. To add a replica index, you must provide the complete set of replicas to this parameter. If you omit a replica from this list, the replica turns into a regular, standalone index that will no longer by synced with the primary index. **Modifier** - `virtual(\"REPLICA\")`. Create a virtual replica, Virtual replicas don't increase the number of records and are optimized for [Relevant sorting](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/in-depth/relevant-sort/). """ - pagination_limited_to: Optional[int] = Field( - default=None, alias="paginationLimitedTo" - ) + pagination_limited_to: Optional[int] = None """ Maximum number of search results that can be obtained through pagination. Higher pagination limits might slow down your search. For pagination limits above 1,000, the sorting of results beyond the 1,000th hit can't be guaranteed. """ - unretrievable_attributes: Optional[List[str]] = Field( - default=None, alias="unretrievableAttributes" - ) + unretrievable_attributes: Optional[List[str]] = None """ Attributes that can't be retrieved at query time. This can be useful if you want to use an attribute for ranking or to [restrict access](https://www.algolia.com/doc/guides/security/api-keys/how-to/user-restricted-access-to-data/), but don't want to include it in the search results. Attribute names are case-sensitive. """ - disable_typo_tolerance_on_words: Optional[List[str]] = Field( - default=None, alias="disableTypoToleranceOnWords" - ) + disable_typo_tolerance_on_words: Optional[List[str]] = None """ Words for which you want to turn off [typo tolerance](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/). This also turns off [word splitting and concatenation](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/splitting-and-concatenation/) for the specified words. """ - attributes_to_transliterate: Optional[List[str]] = Field( - default=None, alias="attributesToTransliterate" - ) + attributes_to_transliterate: Optional[List[str]] = None """ Attributes, for which you want to support [Japanese transliteration](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/#japanese-transliteration-and-type-ahead). Transliteration supports searching in any of the Japanese writing systems. To support transliteration, you must set the indexing language to Japanese. Attribute names are case-sensitive. """ - camel_case_attributes: Optional[List[str]] = Field( - default=None, alias="camelCaseAttributes" - ) + camel_case_attributes: Optional[List[str]] = None """ Attributes for which to split [camel case](https://wikipedia.org/wiki/Camel_case) words. Attribute names are case-sensitive. """ - decompounded_attributes: Optional[object] = Field( - default=None, alias="decompoundedAttributes" - ) + decompounded_attributes: Optional[object] = None """ Searchable attributes to which Algolia should apply [word segmentation](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/how-to/customize-segmentation/) (decompounding). Attribute names are case-sensitive. Compound words are formed by combining two or more individual words, and are particularly prevalent in Germanic languages—for example, \"firefighter\". With decompounding, the individual components are indexed separately. You can specify different lists for different languages. Decompounding is supported for these languages: Dutch (`nl`), German (`de`), Finnish (`fi`), Danish (`da`), Swedish (`sv`), and Norwegian (`no`). Decompounding doesn't work for words with [non-spacing mark Unicode characters](https://www.charactercodes.net/category/non-spacing_mark). For example, `Gartenstühle` won't be decompounded if the `ü` consists of `u` (U+0075) and `◌̈` (U+0308). """ - index_languages: Optional[List[SupportedLanguage]] = Field( - default=None, alias="indexLanguages" - ) + index_languages: Optional[List[SupportedLanguage]] = None """ Languages for language-specific processing steps, such as word detection and dictionary settings. **You should always specify an indexing language.** If you don't specify an indexing language, the search engine uses all [supported languages](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/supported-languages/), or the languages you specified with the `ignorePlurals` or `removeStopWords` parameters. This can lead to unexpected search results. For more information, see [Language-specific configuration](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/). """ - disable_prefix_on_attributes: Optional[List[str]] = Field( - default=None, alias="disablePrefixOnAttributes" - ) + disable_prefix_on_attributes: Optional[List[str]] = None """ Searchable attributes for which you want to turn off [prefix matching](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/override-search-engine-defaults/#adjusting-prefix-search). Attribute names are case-sensitive. """ - allow_compression_of_integer_array: Optional[bool] = Field( - default=None, alias="allowCompressionOfIntegerArray" - ) + allow_compression_of_integer_array: Optional[bool] = None """ Whether arrays with exclusively non-negative integers should be compressed for better performance. If true, the compressed arrays may be reordered. """ - numeric_attributes_for_filtering: Optional[List[str]] = Field( - default=None, alias="numericAttributesForFiltering" - ) + numeric_attributes_for_filtering: Optional[List[str]] = None """ Numeric attributes that can be used as [numerical filters](https://www.algolia.com/doc/guides/managing-results/rules/detecting-intent/how-to/applying-a-custom-filter-for-a-specific-query/#numerical-filters). Attribute names are case-sensitive. By default, all numeric attributes are available as numerical filters. For faster indexing, reduce the number of numeric attributes. If you want to turn off filtering for all numeric attributes, specifiy an attribute that doesn't exist in your index, such as `NO_NUMERIC_FILTERING`. **Modifier** - `equalOnly(\"ATTRIBUTE\")`. Support only filtering based on equality comparisons `=` and `!=`. """ - separators_to_index: Optional[str] = Field(default=None, alias="separatorsToIndex") + separators_to_index: Optional[str] = None """ Controls which separators are indexed. Separators are all non-letter characters except spaces and currency characters, such as $€£¥. By default, separator characters aren't indexed. With `separatorsToIndex`, Algolia treats separator characters as separate words. For example, a search for `C#` would report two matches. """ - searchable_attributes: Optional[List[str]] = Field( - default=None, alias="searchableAttributes" - ) + searchable_attributes: Optional[List[str]] = None """ Attributes used for searching. Attribute names are case-sensitive. By default, all attributes are searchable and the [Attribute](https://www.algolia.com/doc/guides/managing-results/relevance-overview/in-depth/ranking-criteria/#attribute) ranking criterion is turned off. With a non-empty list, Algolia only returns results with matches in the selected attributes. In addition, the Attribute ranking criterion is turned on: matches in attributes that are higher in the list of `searchableAttributes` rank first. To make matches in two attributes rank equally, include them in a comma-separated string, such as `\"title,alternate_title\"`. Attributes with the same priority are always unordered. For more information, see [Searchable attributes](https://www.algolia.com/doc/guides/sending-and-managing-data/prepare-your-data/how-to/setting-searchable-attributes/). **Modifier** - `unordered(\"ATTRIBUTE\")`. Ignore the position of a match within the attribute. Without modifier, matches at the beginning of an attribute rank higer than matches at the end. """ - user_data: Optional[object] = Field(default=None, alias="userData") + user_data: Optional[object] = None """ An object with custom data. You can store up to 32kB as custom data. """ - custom_normalization: Optional[Dict[str, Dict[str, str]]] = Field( - default=None, alias="customNormalization" - ) + custom_normalization: Optional[Dict[str, Dict[str, str]]] = None """ Characters and their normalized replacements. This overrides Algolia's default [normalization](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/normalization/). """ - attribute_for_distinct: Optional[str] = Field( - default=None, alias="attributeForDistinct" - ) + attribute_for_distinct: Optional[str] = None """ Attribute that should be used to establish groups of results. Attribute names are case-sensitive. All records with the same value for this attribute are considered a group. You can combine `attributeForDistinct` with the `distinct` search parameter to control how many items per group are included in the search results. If you want to use the same attribute also for faceting, use the `afterDistinct` modifier of the `attributesForFaceting` setting. This applies faceting _after_ deduplication, which will result in accurate facet counts. """ - attributes_to_retrieve: Optional[List[str]] = Field( - default=None, alias="attributesToRetrieve" - ) + attributes_to_retrieve: Optional[List[str]] = None """ Attributes to include in the API response. To reduce the size of your response, you can retrieve only some of the attributes. Attribute names are case-sensitive. - `*` retrieves all attributes, except attributes included in the `customRanking` and `unretrievableAttributes` settings. - To retrieve all attributes except a specific one, prefix the attribute with a dash and combine it with the `*`: `[\"*\", \"-ATTRIBUTE\"]`. - The `objectID` attribute is always included. """ - ranking: Optional[List[str]] = Field(default=None, alias="ranking") + ranking: Optional[List[str]] = None """ Determines the order in which Algolia returns your results. By default, each entry corresponds to a [ranking criteria](https://www.algolia.com/doc/guides/managing-results/relevance-overview/in-depth/ranking-criteria/). The tie-breaking algorithm sequentially applies each criterion in the order they're specified. If you configure a replica index for [sorting by an attribute](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/how-to/sort-by-attribute/), you put the sorting attribute at the top of the list. **Modifiers** - `asc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in ascending order. - `desc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in descending order. Before you modify the default setting, you should test your changes in the dashboard, and by [A/B testing](https://www.algolia.com/doc/guides/ab-testing/what-is-ab-testing/). """ - relevancy_strictness: Optional[int] = Field( - default=None, alias="relevancyStrictness" - ) + relevancy_strictness: Optional[int] = None """ Relevancy threshold below which less relevant results aren't included in the results. You can only set `relevancyStrictness` on [virtual replica indices](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/in-depth/replicas/#what-are-virtual-replicas). Use this setting to strike a balance between the relevance and number of returned results. """ - attributes_to_highlight: Optional[List[str]] = Field( - default=None, alias="attributesToHighlight" - ) + attributes_to_highlight: Optional[List[str]] = None """ Attributes to highlight. By default, all searchable attributes are highlighted. Use `*` to highlight all attributes or use an empty array `[]` to turn off highlighting. Attribute names are case-sensitive. With highlighting, strings that match the search query are surrounded by HTML tags defined by `highlightPreTag` and `highlightPostTag`. You can use this to visually highlight matching parts of a search query in your UI. For more information, see [Highlighting and snippeting](https://www.algolia.com/doc/guides/building-search-ui/ui-and-ux-patterns/highlighting-snippeting/js/). """ - attributes_to_snippet: Optional[List[str]] = Field( - default=None, alias="attributesToSnippet" - ) + attributes_to_snippet: Optional[List[str]] = None """ Attributes for which to enable snippets. Attribute names are case-sensitive. Snippets provide additional context to matched words. If you enable snippets, they include 10 words, including the matched word. The matched word will also be wrapped by HTML tags for highlighting. You can adjust the number of words with the following notation: `ATTRIBUTE:NUMBER`, where `NUMBER` is the number of words to be extracted. """ - highlight_pre_tag: Optional[str] = Field(default=None, alias="highlightPreTag") + highlight_pre_tag: Optional[str] = None """ HTML tag to insert before the highlighted parts in all highlighted results and snippets. """ - highlight_post_tag: Optional[str] = Field(default=None, alias="highlightPostTag") + highlight_post_tag: Optional[str] = None """ HTML tag to insert after the highlighted parts in all highlighted results and snippets. """ - snippet_ellipsis_text: Optional[str] = Field( - default=None, alias="snippetEllipsisText" - ) + snippet_ellipsis_text: Optional[str] = None """ String used as an ellipsis indicator when a snippet is truncated. """ - restrict_highlight_and_snippet_arrays: Optional[bool] = Field( - default=None, alias="restrictHighlightAndSnippetArrays" - ) + restrict_highlight_and_snippet_arrays: Optional[bool] = None """ Whether to restrict highlighting and snippeting to items that at least partially matched the search query. By default, all items are highlighted and snippeted. """ - min_word_sizefor1_typo: Optional[int] = Field( - default=None, alias="minWordSizefor1Typo" - ) + min_word_sizefor1_typo: Optional[int] = None """ Minimum number of characters a word in the search query must contain to accept matches with [one typo](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/in-depth/configuring-typo-tolerance/#configuring-word-length-for-typos). """ - min_word_sizefor2_typos: Optional[int] = Field( - default=None, alias="minWordSizefor2Typos" - ) + min_word_sizefor2_typos: Optional[int] = None """ Minimum number of characters a word in the search query must contain to accept matches with [two typos](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/in-depth/configuring-typo-tolerance/#configuring-word-length-for-typos). """ - typo_tolerance: Optional[TypoTolerance] = Field(default=None, alias="typoTolerance") - allow_typos_on_numeric_tokens: Optional[bool] = Field( - default=None, alias="allowTyposOnNumericTokens" - ) + typo_tolerance: Optional[TypoTolerance] = None + allow_typos_on_numeric_tokens: Optional[bool] = None """ Whether to allow typos on numbers in the search query. Turn off this setting to reduce the number of irrelevant matches when searching in large sets of similar numbers. """ - disable_typo_tolerance_on_attributes: Optional[List[str]] = Field( - default=None, alias="disableTypoToleranceOnAttributes" - ) + disable_typo_tolerance_on_attributes: Optional[List[str]] = None """ Attributes for which you want to turn off [typo tolerance](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/). Attribute names are case-sensitive. Returning only exact matches can help when: - [Searching in hyphenated attributes](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/how-to/how-to-search-in-hyphenated-attributes/). - Reducing the number of matches when you have too many. This can happen with attributes that are long blocks of text, such as product descriptions. Consider alternatives such as `disableTypoToleranceOnWords` or adding synonyms if your attributes have intentional unusual spellings that might look like typos. """ - ignore_plurals: Optional[IgnorePlurals] = Field(default=None, alias="ignorePlurals") - remove_stop_words: Optional[RemoveStopWords] = Field( - default=None, alias="removeStopWords" - ) - query_languages: Optional[List[SupportedLanguage]] = Field( - default=None, alias="queryLanguages" - ) + ignore_plurals: Optional[IgnorePlurals] = None + remove_stop_words: Optional[RemoveStopWords] = None + query_languages: Optional[List[SupportedLanguage]] = None """ Languages for language-specific query processing steps such as plurals, stop-word removal, and word-detection dictionaries. This setting sets a default list of languages used by the `removeStopWords` and `ignorePlurals` settings. This setting also sets a dictionary for word detection in the logogram-based [CJK](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/normalization/#normalization-for-logogram-based-languages-cjk) languages. To support this, you must place the CJK language **first**. **You should always specify a query language.** If you don't specify an indexing language, the search engine uses all [supported languages](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/supported-languages/), or the languages you specified with the `ignorePlurals` or `removeStopWords` parameters. This can lead to unexpected search results. For more information, see [Language-specific configuration](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/). """ - decompound_query: Optional[bool] = Field(default=None, alias="decompoundQuery") + decompound_query: Optional[bool] = None """ Whether to split compound words in the query into their building blocks. For more information, see [Word segmentation](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/#splitting-compound-words). Word segmentation is supported for these languages: German, Dutch, Finnish, Swedish, and Norwegian. Decompounding doesn't work for words with [non-spacing mark Unicode characters](https://www.charactercodes.net/category/non-spacing_mark). For example, `Gartenstühle` won't be decompounded if the `ü` consists of `u` (U+0075) and `◌̈` (U+0308). """ - enable_rules: Optional[bool] = Field(default=None, alias="enableRules") + enable_rules: Optional[bool] = None """ Whether to enable rules. """ - enable_personalization: Optional[bool] = Field( - default=None, alias="enablePersonalization" - ) + enable_personalization: Optional[bool] = None """ Whether to enable Personalization. """ - query_type: Optional[QueryType] = Field(default=None, alias="queryType") - remove_words_if_no_results: Optional[RemoveWordsIfNoResults] = Field( - default=None, alias="removeWordsIfNoResults" - ) - advanced_syntax: Optional[bool] = Field(default=None, alias="advancedSyntax") + query_type: Optional[QueryType] = None + remove_words_if_no_results: Optional[RemoveWordsIfNoResults] = None + advanced_syntax: Optional[bool] = None """ Whether to support phrase matching and excluding words from search queries. Use the `advancedSyntaxFeatures` parameter to control which feature is supported. """ - optional_words: Optional[List[str]] = Field(default=None, alias="optionalWords") + optional_words: Optional[List[str]] = None """ Words that should be considered optional when found in the query. By default, records must match all words in the search query to be included in the search results. Adding optional words can help to increase the number of search results by running an additional search query that doesn't include the optional words. For example, if the search query is \"action video\" and \"video\" is an optional word, the search engine runs two queries. One for \"action video\" and one for \"action\". Records that match all words are ranked higher. For a search query with 4 or more words **and** all its words are optional, the number of matched words required for a record to be included in the search results increases for every 1,000 records: - If `optionalWords` has less than 10 words, the required number of matched words increases by 1: results 1 to 1,000 require 1 matched word, results 1,001 to 2000 need 2 matched words. - If `optionalWords` has 10 or more words, the number of required matched words increases by the number of optional words dividied by 5 (rounded down). For example, with 18 optional words: results 1 to 1,000 require 1 matched word, results 1,001 to 2000 need 4 matched words. For more information, see [Optional words](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/empty-or-insufficient-results/#creating-a-list-of-optional-words). """ - disable_exact_on_attributes: Optional[List[str]] = Field( - default=None, alias="disableExactOnAttributes" - ) + disable_exact_on_attributes: Optional[List[str]] = None """ Searchable attributes for which you want to [turn off the Exact ranking criterion](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/override-search-engine-defaults/in-depth/adjust-exact-settings/#turn-off-exact-for-some-attributes). Attribute names are case-sensitive. This can be useful for attributes with long values, where the likelyhood of an exact match is high, such as product descriptions. Turning off the Exact ranking criterion for these attributes favors exact matching on other attributes. This reduces the impact of individual attributes with a lot of content on ranking. """ - exact_on_single_word_query: Optional[ExactOnSingleWordQuery] = Field( - default=None, alias="exactOnSingleWordQuery" - ) - alternatives_as_exact: Optional[List[AlternativesAsExact]] = Field( - default=None, alias="alternativesAsExact" - ) + exact_on_single_word_query: Optional[ExactOnSingleWordQuery] = None + alternatives_as_exact: Optional[List[AlternativesAsExact]] = None """ Alternatives of query words that should be considered as exact matches by the Exact ranking criterion. - `ignorePlurals`. Plurals and similar declensions added by the `ignorePlurals` setting are considered exact matches. - `singleWordSynonym`. Single-word synonyms, such as \"NY/NYC\" are considered exact matches. - `multiWordsSynonym`. Multi-word synonyms, such as \"NY/New York\" are considered exact matches. """ - advanced_syntax_features: Optional[List[AdvancedSyntaxFeatures]] = Field( - default=None, alias="advancedSyntaxFeatures" - ) + advanced_syntax_features: Optional[List[AdvancedSyntaxFeatures]] = None """ Advanced search syntax features you want to support. - `exactPhrase`. Phrases in quotes must match exactly. For example, `sparkly blue \"iPhone case\"` only returns records with the exact string \"iPhone case\". - `excludeWords`. Query words prefixed with a `-` must not occur in a record. For example, `search -engine` matches records that contain \"search\" but not \"engine\". This setting only has an effect if `advancedSyntax` is true. """ - distinct: Optional[Distinct] = Field(default=None, alias="distinct") - replace_synonyms_in_highlight: Optional[bool] = Field( - default=None, alias="replaceSynonymsInHighlight" - ) + distinct: Optional[Distinct] = None + replace_synonyms_in_highlight: Optional[bool] = None """ Whether to replace a highlighted word with the matched synonym. By default, the original words are highlighted even if a synonym matches. For example, with `home` as a synonym for `house` and a search for `home`, records matching either \"home\" or \"house\" are included in the search results, and either \"home\" or \"house\" are highlighted. With `replaceSynonymsInHighlight` set to `true`, a search for `home` still matches the same records, but all occurences of \"house\" are replaced by \"home\" in the highlighted response. """ - min_proximity: Optional[int] = Field(default=None, alias="minProximity") + min_proximity: Optional[int] = None """ Minimum proximity score for two matching words. This adjusts the [Proximity ranking criterion](https://www.algolia.com/doc/guides/managing-results/relevance-overview/in-depth/ranking-criteria/#proximity) by equally scoring matches that are farther apart. For example, if `minProximity` is 2, neighboring matches and matches with one word between them would have the same score. """ - response_fields: Optional[List[str]] = Field(default=None, alias="responseFields") + response_fields: Optional[List[str]] = None """ Properties to include in the API response of `search` and `browse` requests. By default, all response properties are included. To reduce the response size, you can select, which attributes should be included. You can't exclude these properties: `message`, `warning`, `cursor`, `serverUsed`, `indexUsed`, `abTestVariantID`, `parsedQuery`, or any property triggered by the `getRankingInfo` parameter. Don't exclude properties that you might need in your search UI. """ - max_facet_hits: Optional[int] = Field(default=None, alias="maxFacetHits") + max_facet_hits: Optional[int] = None """ Maximum number of facet values to return when [searching for facet values](https://www.algolia.com/doc/guides/managing-results/refine-results/faceting/#search-for-facet-values). """ - max_values_per_facet: Optional[int] = Field(default=None, alias="maxValuesPerFacet") + max_values_per_facet: Optional[int] = None """ Maximum number of facet values to return for each facet. """ - sort_facet_values_by: Optional[str] = Field(default=None, alias="sortFacetValuesBy") + sort_facet_values_by: Optional[str] = None """ Order in which to retrieve facet values. - `count`. Facet values are retrieved by decreasing count. The count is the number of matching records containing this facet value. - `alpha`. Retrieve facet values alphabetically. This setting doesn't influence how facet values are displayed in your UI (see `renderingContent`). For more information, see [facet value display](https://www.algolia.com/doc/guides/building-search-ui/ui-and-ux-patterns/facet-display/js/). """ - attribute_criteria_computed_by_min_proximity: Optional[bool] = Field( - default=None, alias="attributeCriteriaComputedByMinProximity" - ) + attribute_criteria_computed_by_min_proximity: Optional[bool] = None """ Whether the best matching attribute should be determined by minimum proximity. This setting only affects ranking if the Attribute ranking criterion comes before Proximity in the `ranking` setting. If true, the best matching attribute is selected based on the minimum proximity of multiple matches. Otherwise, the best matching attribute is determined by the order in the `searchableAttributes` setting. """ - rendering_content: Optional[RenderingContent] = Field( - default=None, alias="renderingContent" - ) - enable_re_ranking: Optional[bool] = Field(default=None, alias="enableReRanking") + rendering_content: Optional[RenderingContent] = None + enable_re_ranking: Optional[bool] = None """ Whether this search will use [Dynamic Re-Ranking](https://www.algolia.com/doc/guides/algolia-ai/re-ranking/). This setting only has an effect if you activated Dynamic Re-Ranking for this index in the Algolia dashboard. """ - re_ranking_apply_filter: Optional[ReRankingApplyFilter] = Field( - default=None, alias="reRankingApplyFilter" - ) + re_ranking_apply_filter: Optional[ReRankingApplyFilter] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/get_recommend_task_response.py b/algoliasearch/recommend/models/get_recommend_task_response.py index 9b8ef23f6..ffd57edd7 100644 --- a/algoliasearch/recommend/models/get_recommend_task_response.py +++ b/algoliasearch/recommend/models/get_recommend_task_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,28 @@ from algoliasearch.recommend.models.task_status import TaskStatus +_ALIASES = { + "status": "status", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetRecommendTaskResponse(BaseModel): """ GetRecommendTaskResponse """ - status: TaskStatus = Field(alias="status") + status: TaskStatus model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/get_recommendations_params.py b/algoliasearch/recommend/models/get_recommendations_params.py index f275f2885..11a28fe98 100644 --- a/algoliasearch/recommend/models/get_recommendations_params.py +++ b/algoliasearch/recommend/models/get_recommendations_params.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,13 +22,21 @@ RecommendationsRequest, ) +_ALIASES = { + "requests": "requests", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetRecommendationsParams(BaseModel): """ Recommend request body. """ - requests: List[RecommendationsRequest] = Field(alias="requests") + requests: List[RecommendationsRequest] """ Recommendation request with parameters depending on the requested model. """ model_config = ConfigDict( @@ -36,6 +44,7 @@ class GetRecommendationsParams(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/get_recommendations_response.py b/algoliasearch/recommend/models/get_recommendations_response.py index a2cbc3597..7ddcdbd69 100644 --- a/algoliasearch/recommend/models/get_recommendations_response.py +++ b/algoliasearch/recommend/models/get_recommendations_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,19 +22,28 @@ RecommendationsResults, ) +_ALIASES = { + "results": "results", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetRecommendationsResponse(BaseModel): """ GetRecommendationsResponse """ - results: List[RecommendationsResults] = Field(alias="results") + results: List[RecommendationsResults] model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/hide_consequence_object.py b/algoliasearch/recommend/models/hide_consequence_object.py index a0cf7dc77..90d1439bd 100644 --- a/algoliasearch/recommend/models/hide_consequence_object.py +++ b/algoliasearch/recommend/models/hide_consequence_object.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "object_id": "objectID", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class HideConsequenceObject(BaseModel): """ Object ID of the recommendation you want to exclude. """ - object_id: Optional[str] = Field(default=None, alias="objectID") + object_id: Optional[str] = None """ Unique record identifier. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class HideConsequenceObject(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/highlight_result.py b/algoliasearch/recommend/models/highlight_result.py index d65f9507b..700bc0ddd 100644 --- a/algoliasearch/recommend/models/highlight_result.py +++ b/algoliasearch/recommend/models/highlight_result.py @@ -32,8 +32,8 @@ class HighlightResult(BaseModel): """ Surround words that match the query with HTML tags for highlighting. """ oneof_schema_3_validator: Optional[List[HighlightResult]] = Field(default=None) """ Surround words that match the query with HTML tags for highlighting. """ - actual_instance: Optional[ - Union[Dict[str, HighlightResult], HighlightResultOption, List[HighlightResult]] + actual_instance: Union[ + Dict[str, HighlightResult], HighlightResultOption, List[HighlightResult], None ] = None one_of_schemas: Set[str] = { "Dict[str, HighlightResult]", @@ -51,15 +51,19 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer def unwrap_actual_instance( self, - ) -> Optional[ - Union[Dict[str, HighlightResult], HighlightResultOption, List[HighlightResult]] + ) -> Union[ + Dict[str, HighlightResult], + HighlightResultOption, + List[HighlightResult], + Self, + None, ]: """ Unwraps the `actual_instance` when calling the `to_json` method. @@ -109,9 +113,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -130,8 +134,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/recommend/models/highlight_result_option.py b/algoliasearch/recommend/models/highlight_result_option.py index 635f52ae4..e7c867cb0 100644 --- a/algoliasearch/recommend/models/highlight_result_option.py +++ b/algoliasearch/recommend/models/highlight_result_option.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,18 +20,29 @@ from algoliasearch.recommend.models.match_level import MatchLevel +_ALIASES = { + "value": "value", + "match_level": "matchLevel", + "matched_words": "matchedWords", + "fully_highlighted": "fullyHighlighted", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class HighlightResultOption(BaseModel): """ Surround words that match the query with HTML tags for highlighting. """ - value: str = Field(alias="value") + value: str """ Highlighted attribute value, including HTML tags. """ - match_level: MatchLevel = Field(alias="matchLevel") - matched_words: List[str] = Field(alias="matchedWords") + match_level: MatchLevel + matched_words: List[str] """ List of matched words from the search query. """ - fully_highlighted: Optional[bool] = Field(default=None, alias="fullyHighlighted") + fully_highlighted: Optional[bool] = None """ Whether the entire attribute value is highlighted. """ model_config = ConfigDict( @@ -39,6 +50,7 @@ class HighlightResultOption(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/ignore_plurals.py b/algoliasearch/recommend/models/ignore_plurals.py index 572f7b396..57a12a386 100644 --- a/algoliasearch/recommend/models/ignore_plurals.py +++ b/algoliasearch/recommend/models/ignore_plurals.py @@ -33,9 +33,7 @@ class IgnorePlurals(BaseModel): oneof_schema_3_validator: Optional[bool] = Field(default=None) """ If true, `ignorePlurals` is active for all languages included in `queryLanguages`, or for all supported languages, if `queryLanguges` is empty. If false, singulars, plurals, and other declensions won't be considered equivalent. """ - actual_instance: Optional[Union[BooleanString, List[SupportedLanguage], bool]] = ( - None - ) + actual_instance: Union[BooleanString, List[SupportedLanguage], bool, None] = None one_of_schemas: Set[str] = {"BooleanString", "List[SupportedLanguage]", "bool"} def __init__(self, *args, **kwargs) -> None: @@ -48,14 +46,14 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer def unwrap_actual_instance( self, - ) -> Optional[Union[BooleanString, List[SupportedLanguage], bool]]: + ) -> Union[BooleanString, List[SupportedLanguage], bool, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -104,9 +102,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -118,8 +116,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/recommend/models/index_settings_facets.py b/algoliasearch/recommend/models/index_settings_facets.py index 212bc1b81..71b52af93 100644 --- a/algoliasearch/recommend/models/index_settings_facets.py +++ b/algoliasearch/recommend/models/index_settings_facets.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "order": "order", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class IndexSettingsFacets(BaseModel): """ Order of facet names. """ - order: Optional[List[str]] = Field(default=None, alias="order") + order: Optional[List[str]] = None """ Explicit order of facets or facet values. This setting lets you always show specific facets or facet values at the top of the list. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class IndexSettingsFacets(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/looking_similar_query.py b/algoliasearch/recommend/models/looking_similar_query.py index 580cfb606..6740eb238 100644 --- a/algoliasearch/recommend/models/looking_similar_query.py +++ b/algoliasearch/recommend/models/looking_similar_query.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,33 +22,44 @@ from algoliasearch.recommend.models.looking_similar_model import LookingSimilarModel from algoliasearch.recommend.models.recommend_search_params import RecommendSearchParams +_ALIASES = { + "index_name": "indexName", + "threshold": "threshold", + "max_recommendations": "maxRecommendations", + "query_parameters": "queryParameters", + "model": "model", + "object_id": "objectID", + "fallback_parameters": "fallbackParameters", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class LookingSimilarQuery(BaseModel): """ LookingSimilarQuery """ - index_name: str = Field(alias="indexName") + index_name: str """ Index name (case-sensitive). """ - threshold: float = Field(alias="threshold") + threshold: float """ Minimum score a recommendation must have to be included in the response. """ - max_recommendations: Optional[int] = Field(default=None, alias="maxRecommendations") + max_recommendations: Optional[int] = None """ Maximum number of recommendations to retrieve. By default, all recommendations are returned and no fallback request is made. Depending on the available recommendations and the other request parameters, the actual number of recommendations may be lower than this value. """ - query_parameters: Optional[RecommendSearchParams] = Field( - default=None, alias="queryParameters" - ) - model: LookingSimilarModel = Field(alias="model") - object_id: str = Field(alias="objectID") + query_parameters: Optional[RecommendSearchParams] = None + model: LookingSimilarModel + object_id: str """ Unique record identifier. """ - fallback_parameters: Optional[FallbackParams] = Field( - default=None, alias="fallbackParameters" - ) + fallback_parameters: Optional[FallbackParams] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/matched_geo_location.py b/algoliasearch/recommend/models/matched_geo_location.py index b5de4a639..5384403b5 100644 --- a/algoliasearch/recommend/models/matched_geo_location.py +++ b/algoliasearch/recommend/models/matched_geo_location.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "lat": "lat", + "lng": "lng", + "distance": "distance", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class MatchedGeoLocation(BaseModel): """ MatchedGeoLocation """ - lat: Optional[float] = Field(default=None, alias="lat") + lat: Optional[float] = None """ Latitude of the matched location. """ - lng: Optional[float] = Field(default=None, alias="lng") + lng: Optional[float] = None """ Longitude of the matched location. """ - distance: Optional[int] = Field(default=None, alias="distance") + distance: Optional[int] = None """ Distance between the matched location and the search location (in meters). """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class MatchedGeoLocation(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/numeric_filters.py b/algoliasearch/recommend/models/numeric_filters.py index bd805e7e8..417b7f4d3 100644 --- a/algoliasearch/recommend/models/numeric_filters.py +++ b/algoliasearch/recommend/models/numeric_filters.py @@ -27,7 +27,7 @@ class NumericFilters(BaseModel): oneof_schema_2_validator: Optional[str] = Field(default=None) - actual_instance: Optional[Union[List[NumericFilters], str]] = None + actual_instance: Union[List[NumericFilters], str, None] = None one_of_schemas: Set[str] = {"List[NumericFilters]", "str"} def __init__(self, *args, **kwargs) -> None: @@ -40,12 +40,12 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[List[NumericFilters], str]]: + def unwrap_actual_instance(self) -> Union[List[NumericFilters], str, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -88,9 +88,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -100,8 +100,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], List[NumericFilters], str]]: return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/recommend/models/optional_filters.py b/algoliasearch/recommend/models/optional_filters.py index 08edec6fb..30f02effb 100644 --- a/algoliasearch/recommend/models/optional_filters.py +++ b/algoliasearch/recommend/models/optional_filters.py @@ -27,7 +27,7 @@ class OptionalFilters(BaseModel): oneof_schema_2_validator: Optional[str] = Field(default=None) - actual_instance: Optional[Union[List[OptionalFilters], str]] = None + actual_instance: Union[List[OptionalFilters], str, None] = None one_of_schemas: Set[str] = {"List[OptionalFilters]", "str"} def __init__(self, *args, **kwargs) -> None: @@ -40,12 +40,12 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[List[OptionalFilters], str]]: + def unwrap_actual_instance(self) -> Union[List[OptionalFilters], str, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -88,9 +88,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -100,8 +100,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], List[OptionalFilters], str]] return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/recommend/models/params_consequence.py b/algoliasearch/recommend/models/params_consequence.py index 4151f170d..c475905f5 100644 --- a/algoliasearch/recommend/models/params_consequence.py +++ b/algoliasearch/recommend/models/params_consequence.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,27 @@ from algoliasearch.recommend.models.auto_facet_filter import AutoFacetFilter +_ALIASES = { + "automatic_facet_filters": "automaticFacetFilters", + "filters": "filters", + "optional_filters": "optionalFilters", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ParamsConsequence(BaseModel): """ Filter or boost recommendations matching a facet filter. """ - automatic_facet_filters: Optional[List[AutoFacetFilter]] = Field( - default=None, alias="automaticFacetFilters" - ) + automatic_facet_filters: Optional[List[AutoFacetFilter]] = None """ Filter recommendations that match or don't match the same `facet:facet_value` combination as the viewed item. """ - filters: Optional[str] = Field(default=None, alias="filters") + filters: Optional[str] = None """ Filter expression to only include items that match the filter criteria in the response. You can use these filter expressions: - **Numeric filters.** ` `, where `` is one of `<`, `<=`, `=`, `!=`, `>`, `>=`. - **Ranges.** `: TO ` where `` and `` are the lower and upper limits of the range (inclusive). - **Facet filters.** `:` where `` is a facet attribute (case-sensitive) and `` a facet value. - **Tag filters.** `_tags:` or just `` (case-sensitive). - **Boolean filters.** `: true | false`. You can combine filters with `AND`, `OR`, and `NOT` operators with the following restrictions: - You can only combine filters of the same type with `OR`. **Not supported:** `facet:value OR num > 3`. - You can't use `NOT` with combinations of filters. **Not supported:** `NOT(facet:value OR facet:value)` - You can't combine conjunctions (`AND`) with `OR`. **Not supported:** `facet:value OR (facet:value AND facet:value)` Use quotes around your filters, if the facet attribute name or facet value has spaces, keywords (`OR`, `AND`, `NOT`), or quotes. If a facet attribute is an array, the filter matches if it matches at least one element of the array. For more information, see [Filters](https://www.algolia.com/doc/guides/managing-results/refine-results/filtering/). """ - optional_filters: Optional[List[str]] = Field(default=None, alias="optionalFilters") + optional_filters: Optional[List[str]] = None """ Filters to promote or demote records in the search results. Optional filters work like facet filters, but they don't exclude records from the search results. Records that match the optional filter rank before records that don't match. Matches with higher weights (``) rank before matches with lower weights. If you're using a negative filter `facet:-value`, matching records rank after records that don't match. """ model_config = ConfigDict( @@ -40,6 +48,7 @@ class ParamsConsequence(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/personalization.py b/algoliasearch/recommend/models/personalization.py index a6dcf022c..b04703d41 100644 --- a/algoliasearch/recommend/models/personalization.py +++ b/algoliasearch/recommend/models/personalization.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "filters_score": "filtersScore", + "ranking_score": "rankingScore", + "score": "score", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class Personalization(BaseModel): """ Personalization """ - filters_score: Optional[int] = Field(default=None, alias="filtersScore") + filters_score: Optional[int] = None """ The score of the filters. """ - ranking_score: Optional[int] = Field(default=None, alias="rankingScore") + ranking_score: Optional[int] = None """ The score of the ranking. """ - score: Optional[int] = Field(default=None, alias="score") + score: Optional[int] = None """ The score of the event. """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class Personalization(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/promote_consequence_object.py b/algoliasearch/recommend/models/promote_consequence_object.py index 7edbfda57..1a3cdfbd2 100644 --- a/algoliasearch/recommend/models/promote_consequence_object.py +++ b/algoliasearch/recommend/models/promote_consequence_object.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "object_id": "objectID", + "position": "position", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class PromoteConsequenceObject(BaseModel): """ Object ID and position of the recommendation you want to pin. """ - object_id: Optional[str] = Field(default=None, alias="objectID") + object_id: Optional[str] = None """ Unique record identifier. """ - position: Optional[int] = Field(default=None, alias="position") + position: Optional[int] = None """ Index in the list of recommendations where to place this item. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class PromoteConsequenceObject(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/range.py b/algoliasearch/recommend/models/range.py index 4cda204fa..afbbc148b 100644 --- a/algoliasearch/recommend/models/range.py +++ b/algoliasearch/recommend/models/range.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "var_from": "from", + "value": "value", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class Range(BaseModel): """ Range object with lower and upper values in meters to define custom ranges. """ - var_from: Optional[int] = Field(default=None, alias="from") + var_from: Optional[int] = None """ Lower boundary of a range in meters. The Geo ranking criterion considers all records within the range to be equal. """ - value: Optional[int] = Field(default=None, alias="value") + value: Optional[int] = None """ Upper boundary of a range in meters. The Geo ranking criterion considers all records within the range to be equal. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class Range(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/ranking_info.py b/algoliasearch/recommend/models/ranking_info.py index 769876ae7..086004298 100644 --- a/algoliasearch/recommend/models/ranking_info.py +++ b/algoliasearch/recommend/models/ranking_info.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,41 +21,55 @@ from algoliasearch.recommend.models.matched_geo_location import MatchedGeoLocation from algoliasearch.recommend.models.personalization import Personalization +_ALIASES = { + "filters": "filters", + "first_matched_word": "firstMatchedWord", + "geo_distance": "geoDistance", + "geo_precision": "geoPrecision", + "matched_geo_location": "matchedGeoLocation", + "personalization": "personalization", + "nb_exact_words": "nbExactWords", + "nb_typos": "nbTypos", + "promoted": "promoted", + "proximity_distance": "proximityDistance", + "user_score": "userScore", + "words": "words", + "promoted_by_re_ranking": "promotedByReRanking", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class RankingInfo(BaseModel): """ Object with detailed information about the record's ranking. """ - filters: Optional[int] = Field(default=None, alias="filters") + filters: Optional[int] = None """ Whether a filter matched the query. """ - first_matched_word: int = Field(alias="firstMatchedWord") + first_matched_word: int """ Position of the first matched word in the best matching attribute of the record. """ - geo_distance: int = Field(alias="geoDistance") + geo_distance: int """ Distance between the geo location in the search query and the best matching geo location in the record, divided by the geo precision (in meters). """ - geo_precision: Optional[int] = Field(default=None, alias="geoPrecision") + geo_precision: Optional[int] = None """ Precision used when computing the geo distance, in meters. """ - matched_geo_location: Optional[MatchedGeoLocation] = Field( - default=None, alias="matchedGeoLocation" - ) - personalization: Optional[Personalization] = Field( - default=None, alias="personalization" - ) - nb_exact_words: int = Field(alias="nbExactWords") + matched_geo_location: Optional[MatchedGeoLocation] = None + personalization: Optional[Personalization] = None + nb_exact_words: int """ Number of exactly matched words. """ - nb_typos: int = Field(alias="nbTypos") + nb_typos: int """ Number of typos encountered when matching the record. """ - promoted: Optional[bool] = Field(default=None, alias="promoted") + promoted: Optional[bool] = None """ Whether the record was promoted by a rule. """ - proximity_distance: Optional[int] = Field(default=None, alias="proximityDistance") + proximity_distance: Optional[int] = None """ Number of words between multiple matches in the query plus 1. For single word queries, `proximityDistance` is 0. """ - user_score: int = Field(alias="userScore") + user_score: int """ Overall ranking of the record, expressed as a single integer. This attribute is internal. """ - words: Optional[int] = Field(default=None, alias="words") + words: Optional[int] = None """ Number of matched words. """ - promoted_by_re_ranking: Optional[bool] = Field( - default=None, alias="promotedByReRanking" - ) + promoted_by_re_ranking: Optional[bool] = None """ Whether the record is re-ranked. """ model_config = ConfigDict( @@ -63,6 +77,7 @@ class RankingInfo(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/re_ranking_apply_filter.py b/algoliasearch/recommend/models/re_ranking_apply_filter.py index 39053345c..5718f7ba2 100644 --- a/algoliasearch/recommend/models/re_ranking_apply_filter.py +++ b/algoliasearch/recommend/models/re_ranking_apply_filter.py @@ -27,7 +27,7 @@ class ReRankingApplyFilter(BaseModel): oneof_schema_2_validator: Optional[str] = Field(default=None) - actual_instance: Optional[Union[List[ReRankingApplyFilter], str]] = None + actual_instance: Union[List[ReRankingApplyFilter], str, None] = None one_of_schemas: Set[str] = {"List[ReRankingApplyFilter]", "str"} def __init__(self, *args, **kwargs) -> None: @@ -40,14 +40,14 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer def unwrap_actual_instance( self, - ) -> Optional[Union[List[ReRankingApplyFilter], str]]: + ) -> Union[List[ReRankingApplyFilter], str, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -90,9 +90,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -104,8 +104,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/recommend/models/recommend_hit.py b/algoliasearch/recommend/models/recommend_hit.py index b0a0fa5c9..493b1fb7e 100644 --- a/algoliasearch/recommend/models/recommend_hit.py +++ b/algoliasearch/recommend/models/recommend_hit.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,25 +22,34 @@ from algoliasearch.recommend.models.ranking_info import RankingInfo from algoliasearch.recommend.models.snippet_result import SnippetResult +_ALIASES = { + "object_id": "objectID", + "highlight_result": "_highlightResult", + "snippet_result": "_snippetResult", + "ranking_info": "_rankingInfo", + "distinct_seq_id": "_distinctSeqID", + "score": "_score", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class RecommendHit(BaseModel): """ Recommend hit. """ - object_id: str = Field(alias="objectID") + object_id: str """ Unique record identifier. """ - highlight_result: Optional[Dict[str, HighlightResult]] = Field( - default=None, alias="_highlightResult" - ) + highlight_result: Optional[Dict[str, HighlightResult]] = None """ Surround words that match the query with HTML tags for highlighting. """ - snippet_result: Optional[Dict[str, SnippetResult]] = Field( - default=None, alias="_snippetResult" - ) + snippet_result: Optional[Dict[str, SnippetResult]] = None """ Snippets that show the context around a matching search query. """ - ranking_info: Optional[RankingInfo] = Field(default=None, alias="_rankingInfo") - distinct_seq_id: Optional[int] = Field(default=None, alias="_distinctSeqID") - score: float = Field(alias="_score") + ranking_info: Optional[RankingInfo] = None + distinct_seq_id: Optional[int] = None + score: float """ Recommendation score. """ model_config = ConfigDict( @@ -48,6 +57,7 @@ class RecommendHit(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, extra="allow", ) diff --git a/algoliasearch/recommend/models/recommend_rule.py b/algoliasearch/recommend/models/recommend_rule.py index 3c05c9bf1..bf162fdf9 100644 --- a/algoliasearch/recommend/models/recommend_rule.py +++ b/algoliasearch/recommend/models/recommend_rule.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -23,22 +23,36 @@ from algoliasearch.recommend.models.rule_metadata import RuleMetadata from algoliasearch.recommend.models.time_range import TimeRange +_ALIASES = { + "metadata": "_metadata", + "object_id": "objectID", + "condition": "condition", + "consequence": "consequence", + "description": "description", + "enabled": "enabled", + "validity": "validity", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class RecommendRule(BaseModel): """ Recommend rule. """ - metadata: Optional[RuleMetadata] = Field(default=None, alias="_metadata") - object_id: Optional[str] = Field(default=None, alias="objectID") + metadata: Optional[RuleMetadata] = None + object_id: Optional[str] = None """ Unique identifier of a rule object. """ - condition: Optional[Condition] = Field(default=None, alias="condition") - consequence: Optional[Consequence] = Field(default=None, alias="consequence") - description: Optional[str] = Field(default=None, alias="description") + condition: Optional[Condition] = None + consequence: Optional[Consequence] = None + description: Optional[str] = None """ Description of the rule's purpose. This can be helpful for display in the Algolia dashboard. """ - enabled: Optional[bool] = Field(default=None, alias="enabled") + enabled: Optional[bool] = None """ Indicates whether to enable the rule. If it isn't enabled, it isn't applied at query time. """ - validity: Optional[List[TimeRange]] = Field(default=None, alias="validity") + validity: Optional[List[TimeRange]] = None """ Time periods when the rule is active. """ model_config = ConfigDict( @@ -46,6 +60,7 @@ class RecommendRule(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/recommend_search_params.py b/algoliasearch/recommend/models/recommend_search_params.py index 44f5c5924..fe7cc81e8 100644 --- a/algoliasearch/recommend/models/recommend_search_params.py +++ b/algoliasearch/recommend/models/recommend_search_params.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -43,271 +43,266 @@ from algoliasearch.recommend.models.tag_filters import TagFilters from algoliasearch.recommend.models.typo_tolerance import TypoTolerance +_ALIASES = { + "similar_query": "similarQuery", + "filters": "filters", + "facet_filters": "facetFilters", + "optional_filters": "optionalFilters", + "numeric_filters": "numericFilters", + "tag_filters": "tagFilters", + "sum_or_filters_scores": "sumOrFiltersScores", + "restrict_searchable_attributes": "restrictSearchableAttributes", + "facets": "facets", + "faceting_after_distinct": "facetingAfterDistinct", + "around_lat_lng": "aroundLatLng", + "around_lat_lng_via_ip": "aroundLatLngViaIP", + "around_radius": "aroundRadius", + "around_precision": "aroundPrecision", + "minimum_around_radius": "minimumAroundRadius", + "inside_bounding_box": "insideBoundingBox", + "inside_polygon": "insidePolygon", + "natural_languages": "naturalLanguages", + "rule_contexts": "ruleContexts", + "personalization_impact": "personalizationImpact", + "user_token": "userToken", + "get_ranking_info": "getRankingInfo", + "synonyms": "synonyms", + "click_analytics": "clickAnalytics", + "analytics": "analytics", + "analytics_tags": "analyticsTags", + "percentile_computation": "percentileComputation", + "enable_ab_test": "enableABTest", + "query": "query", + "attributes_for_faceting": "attributesForFaceting", + "replicas": "replicas", + "pagination_limited_to": "paginationLimitedTo", + "unretrievable_attributes": "unretrievableAttributes", + "disable_typo_tolerance_on_words": "disableTypoToleranceOnWords", + "attributes_to_transliterate": "attributesToTransliterate", + "camel_case_attributes": "camelCaseAttributes", + "decompounded_attributes": "decompoundedAttributes", + "index_languages": "indexLanguages", + "disable_prefix_on_attributes": "disablePrefixOnAttributes", + "allow_compression_of_integer_array": "allowCompressionOfIntegerArray", + "numeric_attributes_for_filtering": "numericAttributesForFiltering", + "separators_to_index": "separatorsToIndex", + "searchable_attributes": "searchableAttributes", + "user_data": "userData", + "custom_normalization": "customNormalization", + "attribute_for_distinct": "attributeForDistinct", + "attributes_to_retrieve": "attributesToRetrieve", + "ranking": "ranking", + "relevancy_strictness": "relevancyStrictness", + "attributes_to_highlight": "attributesToHighlight", + "attributes_to_snippet": "attributesToSnippet", + "highlight_pre_tag": "highlightPreTag", + "highlight_post_tag": "highlightPostTag", + "snippet_ellipsis_text": "snippetEllipsisText", + "restrict_highlight_and_snippet_arrays": "restrictHighlightAndSnippetArrays", + "min_word_sizefor1_typo": "minWordSizefor1Typo", + "min_word_sizefor2_typos": "minWordSizefor2Typos", + "typo_tolerance": "typoTolerance", + "allow_typos_on_numeric_tokens": "allowTyposOnNumericTokens", + "disable_typo_tolerance_on_attributes": "disableTypoToleranceOnAttributes", + "ignore_plurals": "ignorePlurals", + "remove_stop_words": "removeStopWords", + "query_languages": "queryLanguages", + "decompound_query": "decompoundQuery", + "enable_rules": "enableRules", + "enable_personalization": "enablePersonalization", + "query_type": "queryType", + "remove_words_if_no_results": "removeWordsIfNoResults", + "advanced_syntax": "advancedSyntax", + "optional_words": "optionalWords", + "disable_exact_on_attributes": "disableExactOnAttributes", + "exact_on_single_word_query": "exactOnSingleWordQuery", + "alternatives_as_exact": "alternativesAsExact", + "advanced_syntax_features": "advancedSyntaxFeatures", + "distinct": "distinct", + "replace_synonyms_in_highlight": "replaceSynonymsInHighlight", + "min_proximity": "minProximity", + "response_fields": "responseFields", + "max_facet_hits": "maxFacetHits", + "max_values_per_facet": "maxValuesPerFacet", + "sort_facet_values_by": "sortFacetValuesBy", + "attribute_criteria_computed_by_min_proximity": "attributeCriteriaComputedByMinProximity", + "rendering_content": "renderingContent", + "enable_re_ranking": "enableReRanking", + "re_ranking_apply_filter": "reRankingApplyFilter", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class RecommendSearchParams(BaseModel): """ Search parameters for filtering the recommendations. """ - similar_query: Optional[str] = Field(default=None, alias="similarQuery") + similar_query: Optional[str] = None """ Keywords to be used instead of the search query to conduct a more broader search. Using the `similarQuery` parameter changes other settings: - `queryType` is set to `prefixNone`. - `removeStopWords` is set to true. - `words` is set as the first ranking criterion. - All remaining words are treated as `optionalWords`. Since the `similarQuery` is supposed to do a broad search, they usually return many results. Combine it with `filters` to narrow down the list of results. """ - filters: Optional[str] = Field(default=None, alias="filters") + filters: Optional[str] = None """ Filter expression to only include items that match the filter criteria in the response. You can use these filter expressions: - **Numeric filters.** ` `, where `` is one of `<`, `<=`, `=`, `!=`, `>`, `>=`. - **Ranges.** `: TO ` where `` and `` are the lower and upper limits of the range (inclusive). - **Facet filters.** `:` where `` is a facet attribute (case-sensitive) and `` a facet value. - **Tag filters.** `_tags:` or just `` (case-sensitive). - **Boolean filters.** `: true | false`. You can combine filters with `AND`, `OR`, and `NOT` operators with the following restrictions: - You can only combine filters of the same type with `OR`. **Not supported:** `facet:value OR num > 3`. - You can't use `NOT` with combinations of filters. **Not supported:** `NOT(facet:value OR facet:value)` - You can't combine conjunctions (`AND`) with `OR`. **Not supported:** `facet:value OR (facet:value AND facet:value)` Use quotes around your filters, if the facet attribute name or facet value has spaces, keywords (`OR`, `AND`, `NOT`), or quotes. If a facet attribute is an array, the filter matches if it matches at least one element of the array. For more information, see [Filters](https://www.algolia.com/doc/guides/managing-results/refine-results/filtering/). """ - facet_filters: Optional[FacetFilters] = Field(default=None, alias="facetFilters") - optional_filters: Optional[OptionalFilters] = Field( - default=None, alias="optionalFilters" - ) - numeric_filters: Optional[NumericFilters] = Field( - default=None, alias="numericFilters" - ) - tag_filters: Optional[TagFilters] = Field(default=None, alias="tagFilters") - sum_or_filters_scores: Optional[bool] = Field( - default=None, alias="sumOrFiltersScores" - ) + facet_filters: Optional[FacetFilters] = None + optional_filters: Optional[OptionalFilters] = None + numeric_filters: Optional[NumericFilters] = None + tag_filters: Optional[TagFilters] = None + sum_or_filters_scores: Optional[bool] = None """ Whether to sum all filter scores. If true, all filter scores are summed. Otherwise, the maximum filter score is kept. For more information, see [filter scores](https://www.algolia.com/doc/guides/managing-results/refine-results/filtering/in-depth/filter-scoring/#accumulating-scores-with-sumorfiltersscores). """ - restrict_searchable_attributes: Optional[List[str]] = Field( - default=None, alias="restrictSearchableAttributes" - ) + restrict_searchable_attributes: Optional[List[str]] = None """ Restricts a search to a subset of your searchable attributes. Attribute names are case-sensitive. """ - facets: Optional[List[str]] = Field(default=None, alias="facets") + facets: Optional[List[str]] = None """ Facets for which to retrieve facet values that match the search criteria and the number of matching facet values. To retrieve all facets, use the wildcard character `*`. For more information, see [facets](https://www.algolia.com/doc/guides/managing-results/refine-results/faceting/#contextual-facet-values-and-counts). """ - faceting_after_distinct: Optional[bool] = Field( - default=None, alias="facetingAfterDistinct" - ) + faceting_after_distinct: Optional[bool] = None """ Whether faceting should be applied after deduplication with `distinct`. This leads to accurate facet counts when using faceting in combination with `distinct`. It's usually better to use `afterDistinct` modifiers in the `attributesForFaceting` setting, as `facetingAfterDistinct` only computes correct facet counts if all records have the same facet values for the `attributeForDistinct`. """ - around_lat_lng: Optional[str] = Field(default=None, alias="aroundLatLng") + around_lat_lng: Optional[str] = None """ Coordinates for the center of a circle, expressed as a comma-separated string of latitude and longitude. Only records included within circle around this central location are included in the results. The radius of the circle is determined by the `aroundRadius` and `minimumAroundRadius` settings. This parameter is ignored if you also specify `insidePolygon` or `insideBoundingBox`. """ - around_lat_lng_via_ip: Optional[bool] = Field( - default=None, alias="aroundLatLngViaIP" - ) + around_lat_lng_via_ip: Optional[bool] = None """ Whether to obtain the coordinates from the request's IP address. """ - around_radius: Optional[AroundRadius] = Field(default=None, alias="aroundRadius") - around_precision: Optional[AroundPrecision] = Field( - default=None, alias="aroundPrecision" - ) - minimum_around_radius: Optional[int] = Field( - default=None, alias="minimumAroundRadius" - ) + around_radius: Optional[AroundRadius] = None + around_precision: Optional[AroundPrecision] = None + minimum_around_radius: Optional[int] = None """ Minimum radius (in meters) for a search around a location when `aroundRadius` isn't set. """ - inside_bounding_box: Optional[List[List[float]]] = Field( - default=None, alias="insideBoundingBox" - ) + inside_bounding_box: Optional[List[List[float]]] = None """ Coordinates for a rectangular area in which to search. Each bounding box is defined by the two opposite points of its diagonal, and expressed as latitude and longitude pair: `[p1 lat, p1 long, p2 lat, p2 long]`. Provide multiple bounding boxes as nested arrays. For more information, see [rectangular area](https://www.algolia.com/doc/guides/managing-results/refine-results/geolocation/#filtering-inside-rectangular-or-polygonal-areas). """ - inside_polygon: Optional[List[List[float]]] = Field( - default=None, alias="insidePolygon" - ) + inside_polygon: Optional[List[List[float]]] = None """ Coordinates of a polygon in which to search. Polygons are defined by 3 to 10,000 points. Each point is represented by its latitude and longitude. Provide multiple polygons as nested arrays. For more information, see [filtering inside polygons](https://www.algolia.com/doc/guides/managing-results/refine-results/geolocation/#filtering-inside-rectangular-or-polygonal-areas). This parameter is ignored if you also specify `insideBoundingBox`. """ - natural_languages: Optional[List[SupportedLanguage]] = Field( - default=None, alias="naturalLanguages" - ) + natural_languages: Optional[List[SupportedLanguage]] = None """ ISO language codes that adjust settings that are useful for processing natural language queries (as opposed to keyword searches): - Sets `removeStopWords` and `ignorePlurals` to the list of provided languages. - Sets `removeWordsIfNoResults` to `allOptional`. - Adds a `natural_language` attribute to `ruleContexts` and `analyticsTags`. """ - rule_contexts: Optional[List[str]] = Field(default=None, alias="ruleContexts") + rule_contexts: Optional[List[str]] = None """ Assigns a rule context to the search query. [Rule contexts](https://www.algolia.com/doc/guides/managing-results/rules/rules-overview/how-to/customize-search-results-by-platform/#whats-a-context) are strings that you can use to trigger matching rules. """ - personalization_impact: Optional[int] = Field( - default=None, alias="personalizationImpact" - ) + personalization_impact: Optional[int] = None """ Impact that Personalization should have on this search. The higher this value is, the more Personalization determines the ranking compared to other factors. For more information, see [Understanding Personalization impact](https://www.algolia.com/doc/guides/personalization/personalizing-results/in-depth/configuring-personalization/#understanding-personalization-impact). """ - user_token: Optional[str] = Field(default=None, alias="userToken") + user_token: Optional[str] = None """ Unique pseudonymous or anonymous user identifier. This helps with analytics and click and conversion events. For more information, see [user token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - get_ranking_info: Optional[bool] = Field(default=None, alias="getRankingInfo") + get_ranking_info: Optional[bool] = None """ Whether the search response should include detailed ranking information. """ - synonyms: Optional[bool] = Field(default=None, alias="synonyms") + synonyms: Optional[bool] = None """ Whether to take into account an index's synonyms for this search. """ - click_analytics: Optional[bool] = Field(default=None, alias="clickAnalytics") + click_analytics: Optional[bool] = None """ Whether to include a `queryID` attribute in the response. The query ID is a unique identifier for a search query and is required for tracking [click and conversion events](https://www.algolia.com/guides/sending-events/getting-started/). """ - analytics: Optional[bool] = Field(default=None, alias="analytics") + analytics: Optional[bool] = None """ Whether this search will be included in Analytics. """ - analytics_tags: Optional[List[str]] = Field(default=None, alias="analyticsTags") + analytics_tags: Optional[List[str]] = None """ Tags to apply to the query for [segmenting analytics data](https://www.algolia.com/doc/guides/search-analytics/guides/segments/). """ - percentile_computation: Optional[bool] = Field( - default=None, alias="percentileComputation" - ) + percentile_computation: Optional[bool] = None """ Whether to include this search when calculating processing-time percentiles. """ - enable_ab_test: Optional[bool] = Field(default=None, alias="enableABTest") + enable_ab_test: Optional[bool] = None """ Whether to enable A/B testing for this search. """ - query: Optional[str] = Field(default=None, alias="query") + query: Optional[str] = None """ Search query. """ - attributes_for_faceting: Optional[List[str]] = Field( - default=None, alias="attributesForFaceting" - ) + attributes_for_faceting: Optional[List[str]] = None """ Attributes used for [faceting](https://www.algolia.com/doc/guides/managing-results/refine-results/faceting/). Facets are attributes that let you categorize search results. They can be used for filtering search results. By default, no attribute is used for faceting. Attribute names are case-sensitive. **Modifiers** - `filterOnly(\"ATTRIBUTE\")`. Allows using this attribute as a filter, but doesn't evalue the facet values. - `searchable(\"ATTRIBUTE\")`. Allows searching for facet values. - `afterDistinct(\"ATTRIBUTE\")`. Evaluates the facet count _after_ deduplication with `distinct`. This ensures accurate facet counts. You can apply this modifier to searchable facets: `afterDistinct(searchable(ATTRIBUTE))`. """ - replicas: Optional[List[str]] = Field(default=None, alias="replicas") + replicas: Optional[List[str]] = None """ Creates [replica indices](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/in-depth/replicas/). Replicas are copies of a primary index with the same records but different settings, synonyms, or rules. If you want to offer a different ranking or sorting of your search results, you'll use replica indices. All index operations on a primary index are automatically forwarded to its replicas. To add a replica index, you must provide the complete set of replicas to this parameter. If you omit a replica from this list, the replica turns into a regular, standalone index that will no longer by synced with the primary index. **Modifier** - `virtual(\"REPLICA\")`. Create a virtual replica, Virtual replicas don't increase the number of records and are optimized for [Relevant sorting](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/in-depth/relevant-sort/). """ - pagination_limited_to: Optional[int] = Field( - default=None, alias="paginationLimitedTo" - ) + pagination_limited_to: Optional[int] = None """ Maximum number of search results that can be obtained through pagination. Higher pagination limits might slow down your search. For pagination limits above 1,000, the sorting of results beyond the 1,000th hit can't be guaranteed. """ - unretrievable_attributes: Optional[List[str]] = Field( - default=None, alias="unretrievableAttributes" - ) + unretrievable_attributes: Optional[List[str]] = None """ Attributes that can't be retrieved at query time. This can be useful if you want to use an attribute for ranking or to [restrict access](https://www.algolia.com/doc/guides/security/api-keys/how-to/user-restricted-access-to-data/), but don't want to include it in the search results. Attribute names are case-sensitive. """ - disable_typo_tolerance_on_words: Optional[List[str]] = Field( - default=None, alias="disableTypoToleranceOnWords" - ) + disable_typo_tolerance_on_words: Optional[List[str]] = None """ Words for which you want to turn off [typo tolerance](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/). This also turns off [word splitting and concatenation](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/splitting-and-concatenation/) for the specified words. """ - attributes_to_transliterate: Optional[List[str]] = Field( - default=None, alias="attributesToTransliterate" - ) + attributes_to_transliterate: Optional[List[str]] = None """ Attributes, for which you want to support [Japanese transliteration](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/#japanese-transliteration-and-type-ahead). Transliteration supports searching in any of the Japanese writing systems. To support transliteration, you must set the indexing language to Japanese. Attribute names are case-sensitive. """ - camel_case_attributes: Optional[List[str]] = Field( - default=None, alias="camelCaseAttributes" - ) + camel_case_attributes: Optional[List[str]] = None """ Attributes for which to split [camel case](https://wikipedia.org/wiki/Camel_case) words. Attribute names are case-sensitive. """ - decompounded_attributes: Optional[object] = Field( - default=None, alias="decompoundedAttributes" - ) + decompounded_attributes: Optional[object] = None """ Searchable attributes to which Algolia should apply [word segmentation](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/how-to/customize-segmentation/) (decompounding). Attribute names are case-sensitive. Compound words are formed by combining two or more individual words, and are particularly prevalent in Germanic languages—for example, \"firefighter\". With decompounding, the individual components are indexed separately. You can specify different lists for different languages. Decompounding is supported for these languages: Dutch (`nl`), German (`de`), Finnish (`fi`), Danish (`da`), Swedish (`sv`), and Norwegian (`no`). Decompounding doesn't work for words with [non-spacing mark Unicode characters](https://www.charactercodes.net/category/non-spacing_mark). For example, `Gartenstühle` won't be decompounded if the `ü` consists of `u` (U+0075) and `◌̈` (U+0308). """ - index_languages: Optional[List[SupportedLanguage]] = Field( - default=None, alias="indexLanguages" - ) + index_languages: Optional[List[SupportedLanguage]] = None """ Languages for language-specific processing steps, such as word detection and dictionary settings. **You should always specify an indexing language.** If you don't specify an indexing language, the search engine uses all [supported languages](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/supported-languages/), or the languages you specified with the `ignorePlurals` or `removeStopWords` parameters. This can lead to unexpected search results. For more information, see [Language-specific configuration](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/). """ - disable_prefix_on_attributes: Optional[List[str]] = Field( - default=None, alias="disablePrefixOnAttributes" - ) + disable_prefix_on_attributes: Optional[List[str]] = None """ Searchable attributes for which you want to turn off [prefix matching](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/override-search-engine-defaults/#adjusting-prefix-search). Attribute names are case-sensitive. """ - allow_compression_of_integer_array: Optional[bool] = Field( - default=None, alias="allowCompressionOfIntegerArray" - ) + allow_compression_of_integer_array: Optional[bool] = None """ Whether arrays with exclusively non-negative integers should be compressed for better performance. If true, the compressed arrays may be reordered. """ - numeric_attributes_for_filtering: Optional[List[str]] = Field( - default=None, alias="numericAttributesForFiltering" - ) + numeric_attributes_for_filtering: Optional[List[str]] = None """ Numeric attributes that can be used as [numerical filters](https://www.algolia.com/doc/guides/managing-results/rules/detecting-intent/how-to/applying-a-custom-filter-for-a-specific-query/#numerical-filters). Attribute names are case-sensitive. By default, all numeric attributes are available as numerical filters. For faster indexing, reduce the number of numeric attributes. If you want to turn off filtering for all numeric attributes, specifiy an attribute that doesn't exist in your index, such as `NO_NUMERIC_FILTERING`. **Modifier** - `equalOnly(\"ATTRIBUTE\")`. Support only filtering based on equality comparisons `=` and `!=`. """ - separators_to_index: Optional[str] = Field(default=None, alias="separatorsToIndex") + separators_to_index: Optional[str] = None """ Controls which separators are indexed. Separators are all non-letter characters except spaces and currency characters, such as $€£¥. By default, separator characters aren't indexed. With `separatorsToIndex`, Algolia treats separator characters as separate words. For example, a search for `C#` would report two matches. """ - searchable_attributes: Optional[List[str]] = Field( - default=None, alias="searchableAttributes" - ) + searchable_attributes: Optional[List[str]] = None """ Attributes used for searching. Attribute names are case-sensitive. By default, all attributes are searchable and the [Attribute](https://www.algolia.com/doc/guides/managing-results/relevance-overview/in-depth/ranking-criteria/#attribute) ranking criterion is turned off. With a non-empty list, Algolia only returns results with matches in the selected attributes. In addition, the Attribute ranking criterion is turned on: matches in attributes that are higher in the list of `searchableAttributes` rank first. To make matches in two attributes rank equally, include them in a comma-separated string, such as `\"title,alternate_title\"`. Attributes with the same priority are always unordered. For more information, see [Searchable attributes](https://www.algolia.com/doc/guides/sending-and-managing-data/prepare-your-data/how-to/setting-searchable-attributes/). **Modifier** - `unordered(\"ATTRIBUTE\")`. Ignore the position of a match within the attribute. Without modifier, matches at the beginning of an attribute rank higer than matches at the end. """ - user_data: Optional[object] = Field(default=None, alias="userData") + user_data: Optional[object] = None """ An object with custom data. You can store up to 32kB as custom data. """ - custom_normalization: Optional[Dict[str, Dict[str, str]]] = Field( - default=None, alias="customNormalization" - ) + custom_normalization: Optional[Dict[str, Dict[str, str]]] = None """ Characters and their normalized replacements. This overrides Algolia's default [normalization](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/normalization/). """ - attribute_for_distinct: Optional[str] = Field( - default=None, alias="attributeForDistinct" - ) + attribute_for_distinct: Optional[str] = None """ Attribute that should be used to establish groups of results. Attribute names are case-sensitive. All records with the same value for this attribute are considered a group. You can combine `attributeForDistinct` with the `distinct` search parameter to control how many items per group are included in the search results. If you want to use the same attribute also for faceting, use the `afterDistinct` modifier of the `attributesForFaceting` setting. This applies faceting _after_ deduplication, which will result in accurate facet counts. """ - attributes_to_retrieve: Optional[List[str]] = Field( - default=None, alias="attributesToRetrieve" - ) + attributes_to_retrieve: Optional[List[str]] = None """ Attributes to include in the API response. To reduce the size of your response, you can retrieve only some of the attributes. Attribute names are case-sensitive. - `*` retrieves all attributes, except attributes included in the `customRanking` and `unretrievableAttributes` settings. - To retrieve all attributes except a specific one, prefix the attribute with a dash and combine it with the `*`: `[\"*\", \"-ATTRIBUTE\"]`. - The `objectID` attribute is always included. """ - ranking: Optional[List[str]] = Field(default=None, alias="ranking") + ranking: Optional[List[str]] = None """ Determines the order in which Algolia returns your results. By default, each entry corresponds to a [ranking criteria](https://www.algolia.com/doc/guides/managing-results/relevance-overview/in-depth/ranking-criteria/). The tie-breaking algorithm sequentially applies each criterion in the order they're specified. If you configure a replica index for [sorting by an attribute](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/how-to/sort-by-attribute/), you put the sorting attribute at the top of the list. **Modifiers** - `asc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in ascending order. - `desc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in descending order. Before you modify the default setting, you should test your changes in the dashboard, and by [A/B testing](https://www.algolia.com/doc/guides/ab-testing/what-is-ab-testing/). """ - relevancy_strictness: Optional[int] = Field( - default=None, alias="relevancyStrictness" - ) + relevancy_strictness: Optional[int] = None """ Relevancy threshold below which less relevant results aren't included in the results. You can only set `relevancyStrictness` on [virtual replica indices](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/in-depth/replicas/#what-are-virtual-replicas). Use this setting to strike a balance between the relevance and number of returned results. """ - attributes_to_highlight: Optional[List[str]] = Field( - default=None, alias="attributesToHighlight" - ) + attributes_to_highlight: Optional[List[str]] = None """ Attributes to highlight. By default, all searchable attributes are highlighted. Use `*` to highlight all attributes or use an empty array `[]` to turn off highlighting. Attribute names are case-sensitive. With highlighting, strings that match the search query are surrounded by HTML tags defined by `highlightPreTag` and `highlightPostTag`. You can use this to visually highlight matching parts of a search query in your UI. For more information, see [Highlighting and snippeting](https://www.algolia.com/doc/guides/building-search-ui/ui-and-ux-patterns/highlighting-snippeting/js/). """ - attributes_to_snippet: Optional[List[str]] = Field( - default=None, alias="attributesToSnippet" - ) + attributes_to_snippet: Optional[List[str]] = None """ Attributes for which to enable snippets. Attribute names are case-sensitive. Snippets provide additional context to matched words. If you enable snippets, they include 10 words, including the matched word. The matched word will also be wrapped by HTML tags for highlighting. You can adjust the number of words with the following notation: `ATTRIBUTE:NUMBER`, where `NUMBER` is the number of words to be extracted. """ - highlight_pre_tag: Optional[str] = Field(default=None, alias="highlightPreTag") + highlight_pre_tag: Optional[str] = None """ HTML tag to insert before the highlighted parts in all highlighted results and snippets. """ - highlight_post_tag: Optional[str] = Field(default=None, alias="highlightPostTag") + highlight_post_tag: Optional[str] = None """ HTML tag to insert after the highlighted parts in all highlighted results and snippets. """ - snippet_ellipsis_text: Optional[str] = Field( - default=None, alias="snippetEllipsisText" - ) + snippet_ellipsis_text: Optional[str] = None """ String used as an ellipsis indicator when a snippet is truncated. """ - restrict_highlight_and_snippet_arrays: Optional[bool] = Field( - default=None, alias="restrictHighlightAndSnippetArrays" - ) + restrict_highlight_and_snippet_arrays: Optional[bool] = None """ Whether to restrict highlighting and snippeting to items that at least partially matched the search query. By default, all items are highlighted and snippeted. """ - min_word_sizefor1_typo: Optional[int] = Field( - default=None, alias="minWordSizefor1Typo" - ) + min_word_sizefor1_typo: Optional[int] = None """ Minimum number of characters a word in the search query must contain to accept matches with [one typo](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/in-depth/configuring-typo-tolerance/#configuring-word-length-for-typos). """ - min_word_sizefor2_typos: Optional[int] = Field( - default=None, alias="minWordSizefor2Typos" - ) + min_word_sizefor2_typos: Optional[int] = None """ Minimum number of characters a word in the search query must contain to accept matches with [two typos](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/in-depth/configuring-typo-tolerance/#configuring-word-length-for-typos). """ - typo_tolerance: Optional[TypoTolerance] = Field(default=None, alias="typoTolerance") - allow_typos_on_numeric_tokens: Optional[bool] = Field( - default=None, alias="allowTyposOnNumericTokens" - ) + typo_tolerance: Optional[TypoTolerance] = None + allow_typos_on_numeric_tokens: Optional[bool] = None """ Whether to allow typos on numbers in the search query. Turn off this setting to reduce the number of irrelevant matches when searching in large sets of similar numbers. """ - disable_typo_tolerance_on_attributes: Optional[List[str]] = Field( - default=None, alias="disableTypoToleranceOnAttributes" - ) + disable_typo_tolerance_on_attributes: Optional[List[str]] = None """ Attributes for which you want to turn off [typo tolerance](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/). Attribute names are case-sensitive. Returning only exact matches can help when: - [Searching in hyphenated attributes](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/how-to/how-to-search-in-hyphenated-attributes/). - Reducing the number of matches when you have too many. This can happen with attributes that are long blocks of text, such as product descriptions. Consider alternatives such as `disableTypoToleranceOnWords` or adding synonyms if your attributes have intentional unusual spellings that might look like typos. """ - ignore_plurals: Optional[IgnorePlurals] = Field(default=None, alias="ignorePlurals") - remove_stop_words: Optional[RemoveStopWords] = Field( - default=None, alias="removeStopWords" - ) - query_languages: Optional[List[SupportedLanguage]] = Field( - default=None, alias="queryLanguages" - ) + ignore_plurals: Optional[IgnorePlurals] = None + remove_stop_words: Optional[RemoveStopWords] = None + query_languages: Optional[List[SupportedLanguage]] = None """ Languages for language-specific query processing steps such as plurals, stop-word removal, and word-detection dictionaries. This setting sets a default list of languages used by the `removeStopWords` and `ignorePlurals` settings. This setting also sets a dictionary for word detection in the logogram-based [CJK](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/normalization/#normalization-for-logogram-based-languages-cjk) languages. To support this, you must place the CJK language **first**. **You should always specify a query language.** If you don't specify an indexing language, the search engine uses all [supported languages](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/supported-languages/), or the languages you specified with the `ignorePlurals` or `removeStopWords` parameters. This can lead to unexpected search results. For more information, see [Language-specific configuration](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/). """ - decompound_query: Optional[bool] = Field(default=None, alias="decompoundQuery") + decompound_query: Optional[bool] = None """ Whether to split compound words in the query into their building blocks. For more information, see [Word segmentation](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/#splitting-compound-words). Word segmentation is supported for these languages: German, Dutch, Finnish, Swedish, and Norwegian. Decompounding doesn't work for words with [non-spacing mark Unicode characters](https://www.charactercodes.net/category/non-spacing_mark). For example, `Gartenstühle` won't be decompounded if the `ü` consists of `u` (U+0075) and `◌̈` (U+0308). """ - enable_rules: Optional[bool] = Field(default=None, alias="enableRules") + enable_rules: Optional[bool] = None """ Whether to enable rules. """ - enable_personalization: Optional[bool] = Field( - default=None, alias="enablePersonalization" - ) + enable_personalization: Optional[bool] = None """ Whether to enable Personalization. """ - query_type: Optional[QueryType] = Field(default=None, alias="queryType") - remove_words_if_no_results: Optional[RemoveWordsIfNoResults] = Field( - default=None, alias="removeWordsIfNoResults" - ) - advanced_syntax: Optional[bool] = Field(default=None, alias="advancedSyntax") + query_type: Optional[QueryType] = None + remove_words_if_no_results: Optional[RemoveWordsIfNoResults] = None + advanced_syntax: Optional[bool] = None """ Whether to support phrase matching and excluding words from search queries. Use the `advancedSyntaxFeatures` parameter to control which feature is supported. """ - optional_words: Optional[List[str]] = Field(default=None, alias="optionalWords") + optional_words: Optional[List[str]] = None """ Words that should be considered optional when found in the query. By default, records must match all words in the search query to be included in the search results. Adding optional words can help to increase the number of search results by running an additional search query that doesn't include the optional words. For example, if the search query is \"action video\" and \"video\" is an optional word, the search engine runs two queries. One for \"action video\" and one for \"action\". Records that match all words are ranked higher. For a search query with 4 or more words **and** all its words are optional, the number of matched words required for a record to be included in the search results increases for every 1,000 records: - If `optionalWords` has less than 10 words, the required number of matched words increases by 1: results 1 to 1,000 require 1 matched word, results 1,001 to 2000 need 2 matched words. - If `optionalWords` has 10 or more words, the number of required matched words increases by the number of optional words dividied by 5 (rounded down). For example, with 18 optional words: results 1 to 1,000 require 1 matched word, results 1,001 to 2000 need 4 matched words. For more information, see [Optional words](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/empty-or-insufficient-results/#creating-a-list-of-optional-words). """ - disable_exact_on_attributes: Optional[List[str]] = Field( - default=None, alias="disableExactOnAttributes" - ) + disable_exact_on_attributes: Optional[List[str]] = None """ Searchable attributes for which you want to [turn off the Exact ranking criterion](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/override-search-engine-defaults/in-depth/adjust-exact-settings/#turn-off-exact-for-some-attributes). Attribute names are case-sensitive. This can be useful for attributes with long values, where the likelyhood of an exact match is high, such as product descriptions. Turning off the Exact ranking criterion for these attributes favors exact matching on other attributes. This reduces the impact of individual attributes with a lot of content on ranking. """ - exact_on_single_word_query: Optional[ExactOnSingleWordQuery] = Field( - default=None, alias="exactOnSingleWordQuery" - ) - alternatives_as_exact: Optional[List[AlternativesAsExact]] = Field( - default=None, alias="alternativesAsExact" - ) + exact_on_single_word_query: Optional[ExactOnSingleWordQuery] = None + alternatives_as_exact: Optional[List[AlternativesAsExact]] = None """ Alternatives of query words that should be considered as exact matches by the Exact ranking criterion. - `ignorePlurals`. Plurals and similar declensions added by the `ignorePlurals` setting are considered exact matches. - `singleWordSynonym`. Single-word synonyms, such as \"NY/NYC\" are considered exact matches. - `multiWordsSynonym`. Multi-word synonyms, such as \"NY/New York\" are considered exact matches. """ - advanced_syntax_features: Optional[List[AdvancedSyntaxFeatures]] = Field( - default=None, alias="advancedSyntaxFeatures" - ) + advanced_syntax_features: Optional[List[AdvancedSyntaxFeatures]] = None """ Advanced search syntax features you want to support. - `exactPhrase`. Phrases in quotes must match exactly. For example, `sparkly blue \"iPhone case\"` only returns records with the exact string \"iPhone case\". - `excludeWords`. Query words prefixed with a `-` must not occur in a record. For example, `search -engine` matches records that contain \"search\" but not \"engine\". This setting only has an effect if `advancedSyntax` is true. """ - distinct: Optional[Distinct] = Field(default=None, alias="distinct") - replace_synonyms_in_highlight: Optional[bool] = Field( - default=None, alias="replaceSynonymsInHighlight" - ) + distinct: Optional[Distinct] = None + replace_synonyms_in_highlight: Optional[bool] = None """ Whether to replace a highlighted word with the matched synonym. By default, the original words are highlighted even if a synonym matches. For example, with `home` as a synonym for `house` and a search for `home`, records matching either \"home\" or \"house\" are included in the search results, and either \"home\" or \"house\" are highlighted. With `replaceSynonymsInHighlight` set to `true`, a search for `home` still matches the same records, but all occurences of \"house\" are replaced by \"home\" in the highlighted response. """ - min_proximity: Optional[int] = Field(default=None, alias="minProximity") + min_proximity: Optional[int] = None """ Minimum proximity score for two matching words. This adjusts the [Proximity ranking criterion](https://www.algolia.com/doc/guides/managing-results/relevance-overview/in-depth/ranking-criteria/#proximity) by equally scoring matches that are farther apart. For example, if `minProximity` is 2, neighboring matches and matches with one word between them would have the same score. """ - response_fields: Optional[List[str]] = Field(default=None, alias="responseFields") + response_fields: Optional[List[str]] = None """ Properties to include in the API response of `search` and `browse` requests. By default, all response properties are included. To reduce the response size, you can select, which attributes should be included. You can't exclude these properties: `message`, `warning`, `cursor`, `serverUsed`, `indexUsed`, `abTestVariantID`, `parsedQuery`, or any property triggered by the `getRankingInfo` parameter. Don't exclude properties that you might need in your search UI. """ - max_facet_hits: Optional[int] = Field(default=None, alias="maxFacetHits") + max_facet_hits: Optional[int] = None """ Maximum number of facet values to return when [searching for facet values](https://www.algolia.com/doc/guides/managing-results/refine-results/faceting/#search-for-facet-values). """ - max_values_per_facet: Optional[int] = Field(default=None, alias="maxValuesPerFacet") + max_values_per_facet: Optional[int] = None """ Maximum number of facet values to return for each facet. """ - sort_facet_values_by: Optional[str] = Field(default=None, alias="sortFacetValuesBy") + sort_facet_values_by: Optional[str] = None """ Order in which to retrieve facet values. - `count`. Facet values are retrieved by decreasing count. The count is the number of matching records containing this facet value. - `alpha`. Retrieve facet values alphabetically. This setting doesn't influence how facet values are displayed in your UI (see `renderingContent`). For more information, see [facet value display](https://www.algolia.com/doc/guides/building-search-ui/ui-and-ux-patterns/facet-display/js/). """ - attribute_criteria_computed_by_min_proximity: Optional[bool] = Field( - default=None, alias="attributeCriteriaComputedByMinProximity" - ) + attribute_criteria_computed_by_min_proximity: Optional[bool] = None """ Whether the best matching attribute should be determined by minimum proximity. This setting only affects ranking if the Attribute ranking criterion comes before Proximity in the `ranking` setting. If true, the best matching attribute is selected based on the minimum proximity of multiple matches. Otherwise, the best matching attribute is determined by the order in the `searchableAttributes` setting. """ - rendering_content: Optional[RenderingContent] = Field( - default=None, alias="renderingContent" - ) - enable_re_ranking: Optional[bool] = Field(default=None, alias="enableReRanking") + rendering_content: Optional[RenderingContent] = None + enable_re_ranking: Optional[bool] = None """ Whether this search will use [Dynamic Re-Ranking](https://www.algolia.com/doc/guides/algolia-ai/re-ranking/). This setting only has an effect if you activated Dynamic Re-Ranking for this index in the Algolia dashboard. """ - re_ranking_apply_filter: Optional[ReRankingApplyFilter] = Field( - default=None, alias="reRankingApplyFilter" - ) + re_ranking_apply_filter: Optional[ReRankingApplyFilter] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/recommend_updated_at_response.py b/algoliasearch/recommend/models/recommend_updated_at_response.py index 9e82fdba6..2950a3527 100644 --- a/algoliasearch/recommend/models/recommend_updated_at_response.py +++ b/algoliasearch/recommend/models/recommend_updated_at_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "task_id": "taskID", + "updated_at": "updatedAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class RecommendUpdatedAtResponse(BaseModel): """ Response, taskID, and update timestamp. """ - task_id: int = Field(alias="taskID") + task_id: int """ Unique identifier of a task. A successful API response means that a task was added to a queue. It might not run immediately. You can check the task's progress with the [`task` operation](#tag/Indices/operation/getTask) and this `taskID`. """ - updated_at: str = Field(alias="updatedAt") + updated_at: str """ Date and time when the object was updated, in RFC 3339 format. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class RecommendUpdatedAtResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/recommendations_hit.py b/algoliasearch/recommend/models/recommendations_hit.py index cc1e4bb01..42a421779 100644 --- a/algoliasearch/recommend/models/recommendations_hit.py +++ b/algoliasearch/recommend/models/recommendations_hit.py @@ -31,7 +31,7 @@ class RecommendationsHit(BaseModel): oneof_schema_2_validator: Optional[TrendingFacetHit] = Field(default=None) - actual_instance: Optional[Union[RecommendHit, TrendingFacetHit]] = None + actual_instance: Union[RecommendHit, TrendingFacetHit, None] = None one_of_schemas: Set[str] = {"RecommendHit", "TrendingFacetHit"} def __init__(self, *args, **kwargs) -> None: @@ -44,12 +44,14 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[RecommendHit, TrendingFacetHit]]: + def unwrap_actual_instance( + self, + ) -> Union[RecommendHit, TrendingFacetHit, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -90,9 +92,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -104,8 +106,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/recommend/models/recommendations_request.py b/algoliasearch/recommend/models/recommendations_request.py index 2f4599c7d..ad5f9f534 100644 --- a/algoliasearch/recommend/models/recommendations_request.py +++ b/algoliasearch/recommend/models/recommendations_request.py @@ -45,15 +45,14 @@ class RecommendationsRequest(BaseModel): oneof_schema_6_validator: Optional[RecommendedForYouQuery] = Field(default=None) - actual_instance: Optional[ - Union[ - BoughtTogetherQuery, - LookingSimilarQuery, - RecommendedForYouQuery, - RelatedQuery, - TrendingFacetsQuery, - TrendingItemsQuery, - ] + actual_instance: Union[ + BoughtTogetherQuery, + LookingSimilarQuery, + RecommendedForYouQuery, + RelatedQuery, + TrendingFacetsQuery, + TrendingItemsQuery, + None, ] = None one_of_schemas: Set[str] = { "BoughtTogetherQuery", @@ -74,22 +73,22 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer def unwrap_actual_instance( self, - ) -> Optional[ - Union[ - BoughtTogetherQuery, - LookingSimilarQuery, - RecommendedForYouQuery, - RelatedQuery, - TrendingFacetsQuery, - TrendingItemsQuery, - ] + ) -> Union[ + BoughtTogetherQuery, + LookingSimilarQuery, + RecommendedForYouQuery, + RelatedQuery, + TrendingFacetsQuery, + TrendingItemsQuery, + Self, + None, ]: """ Unwraps the `actual_instance` when calling the `to_json` method. @@ -155,9 +154,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -179,8 +178,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/recommend/models/recommendations_results.py b/algoliasearch/recommend/models/recommendations_results.py index e6bc6ad1a..ac8b5357e 100644 --- a/algoliasearch/recommend/models/recommendations_results.py +++ b/algoliasearch/recommend/models/recommendations_results.py @@ -11,7 +11,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field, field_validator +from pydantic import BaseModel, ConfigDict, field_validator if version_info >= (3, 11): from typing import Self @@ -25,76 +25,105 @@ from algoliasearch.recommend.models.redirect import Redirect from algoliasearch.recommend.models.rendering_content import RenderingContent +_ALIASES = { + "ab_test_id": "abTestID", + "ab_test_variant_id": "abTestVariantID", + "around_lat_lng": "aroundLatLng", + "automatic_radius": "automaticRadius", + "exhaustive": "exhaustive", + "exhaustive_facets_count": "exhaustiveFacetsCount", + "exhaustive_nb_hits": "exhaustiveNbHits", + "exhaustive_typo": "exhaustiveTypo", + "facets": "facets", + "facets_stats": "facets_stats", + "index": "index", + "index_used": "indexUsed", + "message": "message", + "nb_sorted_hits": "nbSortedHits", + "parsed_query": "parsedQuery", + "processing_time_ms": "processingTimeMS", + "processing_timings_ms": "processingTimingsMS", + "query_after_removal": "queryAfterRemoval", + "redirect": "redirect", + "rendering_content": "renderingContent", + "server_time_ms": "serverTimeMS", + "server_used": "serverUsed", + "user_data": "userData", + "query_id": "queryID", + "automatic_insights": "_automaticInsights", + "page": "page", + "nb_hits": "nbHits", + "nb_pages": "nbPages", + "hits_per_page": "hitsPerPage", + "hits": "hits", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class RecommendationsResults(BaseModel): """ RecommendationsResults """ - ab_test_id: Optional[int] = Field(default=None, alias="abTestID") + ab_test_id: Optional[int] = None """ A/B test ID. This is only included in the response for indices that are part of an A/B test. """ - ab_test_variant_id: Optional[int] = Field(default=None, alias="abTestVariantID") + ab_test_variant_id: Optional[int] = None """ Variant ID. This is only included in the response for indices that are part of an A/B test. """ - around_lat_lng: Optional[str] = Field(default=None, alias="aroundLatLng") + around_lat_lng: Optional[str] = None """ Computed geographical location. """ - automatic_radius: Optional[str] = Field(default=None, alias="automaticRadius") + automatic_radius: Optional[str] = None """ Distance from a central coordinate provided by `aroundLatLng`. """ - exhaustive: Optional[Exhaustive] = Field(default=None, alias="exhaustive") - exhaustive_facets_count: Optional[bool] = Field( - default=None, alias="exhaustiveFacetsCount" - ) + exhaustive: Optional[Exhaustive] = None + exhaustive_facets_count: Optional[bool] = None """ See the `facetsCount` field of the `exhaustive` object in the response. """ - exhaustive_nb_hits: Optional[bool] = Field(default=None, alias="exhaustiveNbHits") + exhaustive_nb_hits: Optional[bool] = None """ See the `nbHits` field of the `exhaustive` object in the response. """ - exhaustive_typo: Optional[bool] = Field(default=None, alias="exhaustiveTypo") + exhaustive_typo: Optional[bool] = None """ See the `typo` field of the `exhaustive` object in the response. """ - facets: Optional[Dict[str, Dict[str, int]]] = Field(default=None, alias="facets") + facets: Optional[Dict[str, Dict[str, int]]] = None """ Facet counts. """ - facets_stats: Optional[Dict[str, FacetStats]] = Field( - default=None, alias="facets_stats" - ) + facets_stats: Optional[Dict[str, FacetStats]] = None """ Statistics for numerical facets. """ - index: Optional[str] = Field(default=None, alias="index") + index: Optional[str] = None """ Index name used for the query. """ - index_used: Optional[str] = Field(default=None, alias="indexUsed") + index_used: Optional[str] = None """ Index name used for the query. During A/B testing, the targeted index isn't always the index used by the query. """ - message: Optional[str] = Field(default=None, alias="message") + message: Optional[str] = None """ Warnings about the query. """ - nb_sorted_hits: Optional[int] = Field(default=None, alias="nbSortedHits") + nb_sorted_hits: Optional[int] = None """ Number of hits selected and sorted by the relevant sort algorithm. """ - parsed_query: Optional[str] = Field(default=None, alias="parsedQuery") + parsed_query: Optional[str] = None """ Post-[normalization](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/#what-does-normalization-mean) query string that will be searched. """ - processing_time_ms: int = Field(alias="processingTimeMS") + processing_time_ms: int """ Time the server took to process the request, in milliseconds. """ - processing_timings_ms: Optional[object] = Field( - default=None, alias="processingTimingsMS" - ) + processing_timings_ms: Optional[object] = None """ Experimental. List of processing steps and their times, in milliseconds. You can use this list to investigate performance issues. """ - query_after_removal: Optional[str] = Field(default=None, alias="queryAfterRemoval") + query_after_removal: Optional[str] = None """ Markup text indicating which parts of the original query have been removed to retrieve a non-empty result set. """ - redirect: Optional[Redirect] = Field(default=None, alias="redirect") - rendering_content: Optional[RenderingContent] = Field( - default=None, alias="renderingContent" - ) - server_time_ms: Optional[int] = Field(default=None, alias="serverTimeMS") + redirect: Optional[Redirect] = None + rendering_content: Optional[RenderingContent] = None + server_time_ms: Optional[int] = None """ Time the server took to process the request, in milliseconds. """ - server_used: Optional[str] = Field(default=None, alias="serverUsed") + server_used: Optional[str] = None """ Host name of the server that processed the request. """ - user_data: Optional[object] = Field(default=None, alias="userData") + user_data: Optional[object] = None """ An object with custom data. You can store up to 32kB as custom data. """ - query_id: Optional[str] = Field(default=None, alias="queryID") + query_id: Optional[str] = None """ Unique identifier for the query. This is used for [click analytics](https://www.algolia.com/doc/guides/analytics/click-analytics/). """ - automatic_insights: Optional[bool] = Field(default=None, alias="_automaticInsights") + automatic_insights: Optional[bool] = None """ Whether automatic events collection is enabled for the application. """ - page: Optional[int] = Field(default=None, alias="page") + page: Optional[int] = None """ Page of search results to retrieve. """ - nb_hits: Optional[int] = Field(default=None, alias="nbHits") + nb_hits: Optional[int] = None """ Number of results (hits). """ - nb_pages: Optional[int] = Field(default=None, alias="nbPages") + nb_pages: Optional[int] = None """ Number of pages of results. """ - hits_per_page: Optional[int] = Field(default=None, alias="hitsPerPage") + hits_per_page: Optional[int] = None """ Number of hits per page. """ - hits: List[RecommendationsHit] = Field(alias="hits") + hits: List[RecommendationsHit] @field_validator("around_lat_lng") def around_lat_lng_validate_regular_expression(cls, value): @@ -113,6 +142,7 @@ def around_lat_lng_validate_regular_expression(cls, value): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/recommended_for_you_query.py b/algoliasearch/recommend/models/recommended_for_you_query.py index a7a94d0cd..5ff7470c3 100644 --- a/algoliasearch/recommend/models/recommended_for_you_query.py +++ b/algoliasearch/recommend/models/recommended_for_you_query.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -24,31 +24,41 @@ RecommendedForYouModel, ) +_ALIASES = { + "index_name": "indexName", + "threshold": "threshold", + "max_recommendations": "maxRecommendations", + "query_parameters": "queryParameters", + "model": "model", + "fallback_parameters": "fallbackParameters", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class RecommendedForYouQuery(BaseModel): """ RecommendedForYouQuery """ - index_name: str = Field(alias="indexName") + index_name: str """ Index name (case-sensitive). """ - threshold: float = Field(alias="threshold") + threshold: float """ Minimum score a recommendation must have to be included in the response. """ - max_recommendations: Optional[int] = Field(default=None, alias="maxRecommendations") + max_recommendations: Optional[int] = None """ Maximum number of recommendations to retrieve. By default, all recommendations are returned and no fallback request is made. Depending on the available recommendations and the other request parameters, the actual number of recommendations may be lower than this value. """ - query_parameters: Optional[RecommendSearchParams] = Field( - default=None, alias="queryParameters" - ) - model: RecommendedForYouModel = Field(alias="model") - fallback_parameters: Optional[FallbackParams] = Field( - default=None, alias="fallbackParameters" - ) + query_parameters: Optional[RecommendSearchParams] = None + model: RecommendedForYouModel + fallback_parameters: Optional[FallbackParams] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/redirect.py b/algoliasearch/recommend/models/redirect.py index be0d4aa11..cf2f4d617 100644 --- a/algoliasearch/recommend/models/redirect.py +++ b/algoliasearch/recommend/models/redirect.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,21 +22,28 @@ RedirectRuleIndexMetadata, ) +_ALIASES = { + "index": "index", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class Redirect(BaseModel): """ [Redirect results to a URL](https://www.algolia.com/doc/guides/managing-results/rules/merchandising-and-promoting/how-to/redirects/), this this parameter is for internal use only. """ - index: Optional[List[RedirectRuleIndexMetadata]] = Field( - default=None, alias="index" - ) + index: Optional[List[RedirectRuleIndexMetadata]] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/redirect_rule_index_data.py b/algoliasearch/recommend/models/redirect_rule_index_data.py index bb6826e62..27ae7d300 100644 --- a/algoliasearch/recommend/models/redirect_rule_index_data.py +++ b/algoliasearch/recommend/models/redirect_rule_index_data.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,28 @@ from typing_extensions import Self +_ALIASES = { + "rule_object_id": "ruleObjectID", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class RedirectRuleIndexData(BaseModel): """ Redirect rule data. """ - rule_object_id: str = Field(alias="ruleObjectID") + rule_object_id: str model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/redirect_rule_index_metadata.py b/algoliasearch/recommend/models/redirect_rule_index_metadata.py index 9b56e60ba..0fa85a610 100644 --- a/algoliasearch/recommend/models/redirect_rule_index_metadata.py +++ b/algoliasearch/recommend/models/redirect_rule_index_metadata.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,27 +22,40 @@ RedirectRuleIndexData, ) +_ALIASES = { + "source": "source", + "dest": "dest", + "reason": "reason", + "succeed": "succeed", + "data": "data", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class RedirectRuleIndexMetadata(BaseModel): """ RedirectRuleIndexMetadata """ - source: str = Field(alias="source") + source: str """ Source index for the redirect rule. """ - dest: str = Field(alias="dest") + dest: str """ Destination index for the redirect rule. """ - reason: str = Field(alias="reason") + reason: str """ Reason for the redirect rule. """ - succeed: bool = Field(alias="succeed") + succeed: bool """ Redirect rule status. """ - data: RedirectRuleIndexData = Field(alias="data") + data: RedirectRuleIndexData model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/redirect_url.py b/algoliasearch/recommend/models/redirect_url.py index fce44987e..f3d9ab8bd 100644 --- a/algoliasearch/recommend/models/redirect_url.py +++ b/algoliasearch/recommend/models/redirect_url.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,28 @@ from typing_extensions import Self +_ALIASES = { + "url": "url", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class RedirectURL(BaseModel): """ The redirect rule container. """ - url: Optional[str] = Field(default=None, alias="url") + url: Optional[str] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/related_query.py b/algoliasearch/recommend/models/related_query.py index 9e3da6133..982580047 100644 --- a/algoliasearch/recommend/models/related_query.py +++ b/algoliasearch/recommend/models/related_query.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,33 +22,44 @@ from algoliasearch.recommend.models.recommend_search_params import RecommendSearchParams from algoliasearch.recommend.models.related_model import RelatedModel +_ALIASES = { + "index_name": "indexName", + "threshold": "threshold", + "max_recommendations": "maxRecommendations", + "query_parameters": "queryParameters", + "model": "model", + "object_id": "objectID", + "fallback_parameters": "fallbackParameters", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class RelatedQuery(BaseModel): """ RelatedQuery """ - index_name: str = Field(alias="indexName") + index_name: str """ Index name (case-sensitive). """ - threshold: float = Field(alias="threshold") + threshold: float """ Minimum score a recommendation must have to be included in the response. """ - max_recommendations: Optional[int] = Field(default=None, alias="maxRecommendations") + max_recommendations: Optional[int] = None """ Maximum number of recommendations to retrieve. By default, all recommendations are returned and no fallback request is made. Depending on the available recommendations and the other request parameters, the actual number of recommendations may be lower than this value. """ - query_parameters: Optional[RecommendSearchParams] = Field( - default=None, alias="queryParameters" - ) - model: RelatedModel = Field(alias="model") - object_id: str = Field(alias="objectID") + query_parameters: Optional[RecommendSearchParams] = None + model: RelatedModel + object_id: str """ Unique record identifier. """ - fallback_parameters: Optional[FallbackParams] = Field( - default=None, alias="fallbackParameters" - ) + fallback_parameters: Optional[FallbackParams] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/remove_stop_words.py b/algoliasearch/recommend/models/remove_stop_words.py index c01d1b6e9..9a4599340 100644 --- a/algoliasearch/recommend/models/remove_stop_words.py +++ b/algoliasearch/recommend/models/remove_stop_words.py @@ -30,7 +30,7 @@ class RemoveStopWords(BaseModel): """ ISO code for languages for which stop words should be removed. This overrides languages you set in `queryLanguges`. """ oneof_schema_2_validator: Optional[bool] = Field(default=None) """ If true, stop words are removed for all languages you included in `queryLanguages`, or for all supported languages, if `queryLanguages` is empty. If false, stop words are not removed. """ - actual_instance: Optional[Union[List[SupportedLanguage], bool]] = None + actual_instance: Union[List[SupportedLanguage], bool, None] = None one_of_schemas: Set[str] = {"List[SupportedLanguage]", "bool"} def __init__(self, *args, **kwargs) -> None: @@ -43,12 +43,14 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[List[SupportedLanguage], bool]]: + def unwrap_actual_instance( + self, + ) -> Union[List[SupportedLanguage], bool, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -91,9 +93,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -103,8 +105,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], List[SupportedLanguage], boo return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/recommend/models/rendering_content.py b/algoliasearch/recommend/models/rendering_content.py index ad7f91977..fd287226f 100644 --- a/algoliasearch/recommend/models/rendering_content.py +++ b/algoliasearch/recommend/models/rendering_content.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,6 +20,17 @@ from algoliasearch.recommend.models.facet_ordering import FacetOrdering from algoliasearch.recommend.models.redirect_url import RedirectURL +from algoliasearch.recommend.models.widgets import Widgets + +_ALIASES = { + "facet_ordering": "facetOrdering", + "redirect": "redirect", + "widgets": "widgets", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) class RenderingContent(BaseModel): @@ -27,14 +38,16 @@ class RenderingContent(BaseModel): Extra data that can be used in the search UI. You can use this to control aspects of your search UI, such as, the order of facet names and values without changing your frontend code. """ - facet_ordering: Optional[FacetOrdering] = Field(default=None, alias="facetOrdering") - redirect: Optional[RedirectURL] = Field(default=None, alias="redirect") + facet_ordering: Optional[FacetOrdering] = None + redirect: Optional[RedirectURL] = None + widgets: Optional[Widgets] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: @@ -72,5 +85,10 @@ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: if obj.get("redirect") is not None else None ) + obj["widgets"] = ( + Widgets.from_dict(obj["widgets"]) + if obj.get("widgets") is not None + else None + ) return cls.model_validate(obj) diff --git a/algoliasearch/recommend/models/rule_metadata.py b/algoliasearch/recommend/models/rule_metadata.py index 7b453c399..588841612 100644 --- a/algoliasearch/recommend/models/rule_metadata.py +++ b/algoliasearch/recommend/models/rule_metadata.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "last_update": "lastUpdate", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class RuleMetadata(BaseModel): """ Rule metadata. """ - last_update: Optional[str] = Field(default=None, alias="lastUpdate") + last_update: Optional[str] = None """ Date and time when the object was updated, in RFC 3339 format. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class RuleMetadata(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/search_recommend_rules_params.py b/algoliasearch/recommend/models/search_recommend_rules_params.py index 9d2ba22aa..01580299c 100644 --- a/algoliasearch/recommend/models/search_recommend_rules_params.py +++ b/algoliasearch/recommend/models/search_recommend_rules_params.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,26 +18,42 @@ from typing_extensions import Self +_ALIASES = { + "query": "query", + "context": "context", + "page": "page", + "hits_per_page": "hitsPerPage", + "enabled": "enabled", + "filters": "filters", + "facets": "facets", + "max_values_per_facet": "maxValuesPerFacet", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class SearchRecommendRulesParams(BaseModel): """ Recommend rules parameters. """ - query: Optional[str] = Field(default=None, alias="query") + query: Optional[str] = None """ Search query. """ - context: Optional[str] = Field(default=None, alias="context") + context: Optional[str] = None """ Only search for rules with matching context. """ - page: Optional[int] = Field(default=None, alias="page") + page: Optional[int] = None """ Requested page of the API response. """ - hits_per_page: Optional[int] = Field(default=None, alias="hitsPerPage") + hits_per_page: Optional[int] = None """ Maximum number of hits per page. """ - enabled: Optional[bool] = Field(default=None, alias="enabled") + enabled: Optional[bool] = None """ Whether to only show rules where the value of their `enabled` property matches this parameter. If absent, show all rules, regardless of their `enabled` property. """ - filters: Optional[str] = Field(default=None, alias="filters") + filters: Optional[str] = None """ Filter expression. This only searches for rules matching the filter expression. """ - facets: Optional[List[str]] = Field(default=None, alias="facets") + facets: Optional[List[str]] = None """ Include facets and facet values in the response. Use `['*']` to include all facets. """ - max_values_per_facet: Optional[int] = Field(default=None, alias="maxValuesPerFacet") + max_values_per_facet: Optional[int] = None """ Maximum number of values to return for each facet. """ model_config = ConfigDict( @@ -45,6 +61,7 @@ class SearchRecommendRulesParams(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/search_recommend_rules_response.py b/algoliasearch/recommend/models/search_recommend_rules_response.py index 8d1ff5384..0f6f0b18e 100644 --- a/algoliasearch/recommend/models/search_recommend_rules_response.py +++ b/algoliasearch/recommend/models/search_recommend_rules_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,30 @@ from algoliasearch.recommend.models.recommend_rule import RecommendRule +_ALIASES = { + "hits": "hits", + "nb_hits": "nbHits", + "page": "page", + "nb_pages": "nbPages", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SearchRecommendRulesResponse(BaseModel): """ SearchRecommendRulesResponse """ - hits: List[RecommendRule] = Field(alias="hits") + hits: List[RecommendRule] """ Recommend rules that match the search criteria. """ - nb_hits: int = Field(alias="nbHits") + nb_hits: int """ Number of results (hits). """ - page: int = Field(alias="page") + page: int """ Page of search results to retrieve. """ - nb_pages: int = Field(alias="nbPages") + nb_pages: int """ Number of pages of results. """ model_config = ConfigDict( @@ -40,6 +51,7 @@ class SearchRecommendRulesResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/snippet_result.py b/algoliasearch/recommend/models/snippet_result.py index 8ddadfe51..bf5052cf4 100644 --- a/algoliasearch/recommend/models/snippet_result.py +++ b/algoliasearch/recommend/models/snippet_result.py @@ -32,8 +32,8 @@ class SnippetResult(BaseModel): """ Snippets that show the context around a matching search query. """ oneof_schema_3_validator: Optional[List[SnippetResult]] = Field(default=None) """ Snippets that show the context around a matching search query. """ - actual_instance: Optional[ - Union[Dict[str, SnippetResult], List[SnippetResult], SnippetResultOption] + actual_instance: Union[ + Dict[str, SnippetResult], List[SnippetResult], SnippetResultOption, None ] = None one_of_schemas: Set[str] = { "Dict[str, SnippetResult]", @@ -51,15 +51,15 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer def unwrap_actual_instance( self, - ) -> Optional[ - Union[Dict[str, SnippetResult], List[SnippetResult], SnippetResultOption] + ) -> Union[ + Dict[str, SnippetResult], List[SnippetResult], SnippetResultOption, Self, None ]: """ Unwraps the `actual_instance` when calling the `to_json` method. @@ -109,9 +109,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -130,8 +130,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/recommend/models/snippet_result_option.py b/algoliasearch/recommend/models/snippet_result_option.py index a8e79c4d1..80422636d 100644 --- a/algoliasearch/recommend/models/snippet_result_option.py +++ b/algoliasearch/recommend/models/snippet_result_option.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,21 +20,31 @@ from algoliasearch.recommend.models.match_level import MatchLevel +_ALIASES = { + "value": "value", + "match_level": "matchLevel", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SnippetResultOption(BaseModel): """ Snippets that show the context around a matching search query. """ - value: str = Field(alias="value") + value: str """ Highlighted attribute value, including HTML tags. """ - match_level: MatchLevel = Field(alias="matchLevel") + match_level: MatchLevel model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/tag_filters.py b/algoliasearch/recommend/models/tag_filters.py index eb88ff91e..7aed9674a 100644 --- a/algoliasearch/recommend/models/tag_filters.py +++ b/algoliasearch/recommend/models/tag_filters.py @@ -27,7 +27,7 @@ class TagFilters(BaseModel): oneof_schema_2_validator: Optional[str] = Field(default=None) - actual_instance: Optional[Union[List[TagFilters], str]] = None + actual_instance: Union[List[TagFilters], str, None] = None one_of_schemas: Set[str] = {"List[TagFilters]", "str"} def __init__(self, *args, **kwargs) -> None: @@ -40,12 +40,12 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[List[TagFilters], str]]: + def unwrap_actual_instance(self) -> Union[List[TagFilters], str, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -88,9 +88,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -100,8 +100,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], List[TagFilters], str]]: return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/recommend/models/time_range.py b/algoliasearch/recommend/models/time_range.py index d0bfe155b..b472ed698 100644 --- a/algoliasearch/recommend/models/time_range.py +++ b/algoliasearch/recommend/models/time_range.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "var_from": "from", + "until": "until", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class TimeRange(BaseModel): """ TimeRange """ - var_from: int = Field(alias="from") + var_from: int """ When the rule should start to be active, in Unix epoch time. """ - until: int = Field(alias="until") + until: int """ When the rule should stop to be active, in Unix epoch time. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class TimeRange(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/trending_facet_hit.py b/algoliasearch/recommend/models/trending_facet_hit.py index 35bd3a914..20c1d0a6a 100644 --- a/algoliasearch/recommend/models/trending_facet_hit.py +++ b/algoliasearch/recommend/models/trending_facet_hit.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "score": "_score", + "facet_name": "facetName", + "facet_value": "facetValue", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class TrendingFacetHit(BaseModel): """ Trending facet hit. """ - score: float = Field(alias="_score") + score: float """ Recommendation score. """ - facet_name: str = Field(alias="facetName") + facet_name: str """ Facet attribute. To be used in combination with `facetValue`. If specified, only recommendations matching the facet filter will be returned. """ - facet_value: str = Field(alias="facetValue") + facet_value: str """ Facet value. To be used in combination with `facetName`. If specified, only recommendations matching the facet filter will be returned. """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class TrendingFacetHit(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/trending_facets_query.py b/algoliasearch/recommend/models/trending_facets_query.py index ebfe768b4..fa8532e68 100644 --- a/algoliasearch/recommend/models/trending_facets_query.py +++ b/algoliasearch/recommend/models/trending_facets_query.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,33 +22,44 @@ from algoliasearch.recommend.models.recommend_search_params import RecommendSearchParams from algoliasearch.recommend.models.trending_facets_model import TrendingFacetsModel +_ALIASES = { + "index_name": "indexName", + "threshold": "threshold", + "max_recommendations": "maxRecommendations", + "query_parameters": "queryParameters", + "facet_name": "facetName", + "model": "model", + "fallback_parameters": "fallbackParameters", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class TrendingFacetsQuery(BaseModel): """ TrendingFacetsQuery """ - index_name: str = Field(alias="indexName") + index_name: str """ Index name (case-sensitive). """ - threshold: float = Field(alias="threshold") + threshold: float """ Minimum score a recommendation must have to be included in the response. """ - max_recommendations: Optional[int] = Field(default=None, alias="maxRecommendations") + max_recommendations: Optional[int] = None """ Maximum number of recommendations to retrieve. By default, all recommendations are returned and no fallback request is made. Depending on the available recommendations and the other request parameters, the actual number of recommendations may be lower than this value. """ - query_parameters: Optional[RecommendSearchParams] = Field( - default=None, alias="queryParameters" - ) - facet_name: object = Field(alias="facetName") + query_parameters: Optional[RecommendSearchParams] = None + facet_name: object """ Facet attribute for which to retrieve trending facet values. """ - model: TrendingFacetsModel = Field(alias="model") - fallback_parameters: Optional[FallbackParams] = Field( - default=None, alias="fallbackParameters" - ) + model: TrendingFacetsModel + fallback_parameters: Optional[FallbackParams] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/trending_items_query.py b/algoliasearch/recommend/models/trending_items_query.py index a653b2298..c25aa318e 100644 --- a/algoliasearch/recommend/models/trending_items_query.py +++ b/algoliasearch/recommend/models/trending_items_query.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,35 +22,47 @@ from algoliasearch.recommend.models.recommend_search_params import RecommendSearchParams from algoliasearch.recommend.models.trending_items_model import TrendingItemsModel +_ALIASES = { + "index_name": "indexName", + "threshold": "threshold", + "max_recommendations": "maxRecommendations", + "query_parameters": "queryParameters", + "facet_name": "facetName", + "facet_value": "facetValue", + "model": "model", + "fallback_parameters": "fallbackParameters", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class TrendingItemsQuery(BaseModel): """ TrendingItemsQuery """ - index_name: str = Field(alias="indexName") + index_name: str """ Index name (case-sensitive). """ - threshold: float = Field(alias="threshold") + threshold: float """ Minimum score a recommendation must have to be included in the response. """ - max_recommendations: Optional[int] = Field(default=None, alias="maxRecommendations") + max_recommendations: Optional[int] = None """ Maximum number of recommendations to retrieve. By default, all recommendations are returned and no fallback request is made. Depending on the available recommendations and the other request parameters, the actual number of recommendations may be lower than this value. """ - query_parameters: Optional[RecommendSearchParams] = Field( - default=None, alias="queryParameters" - ) - facet_name: Optional[str] = Field(default=None, alias="facetName") + query_parameters: Optional[RecommendSearchParams] = None + facet_name: Optional[str] = None """ Facet attribute. To be used in combination with `facetValue`. If specified, only recommendations matching the facet filter will be returned. """ - facet_value: Optional[str] = Field(default=None, alias="facetValue") + facet_value: Optional[str] = None """ Facet value. To be used in combination with `facetName`. If specified, only recommendations matching the facet filter will be returned. """ - model: TrendingItemsModel = Field(alias="model") - fallback_parameters: Optional[FallbackParams] = Field( - default=None, alias="fallbackParameters" - ) + model: TrendingItemsModel + fallback_parameters: Optional[FallbackParams] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/typo_tolerance.py b/algoliasearch/recommend/models/typo_tolerance.py index bae2ec6d4..45c1e10b4 100644 --- a/algoliasearch/recommend/models/typo_tolerance.py +++ b/algoliasearch/recommend/models/typo_tolerance.py @@ -30,7 +30,7 @@ class TypoTolerance(BaseModel): """ Whether typo tolerance is active. If true, matches with typos are included in the search results and rank after exact matches. """ oneof_schema_2_validator: Optional[TypoToleranceEnum] = Field(default=None) - actual_instance: Optional[Union[TypoToleranceEnum, bool]] = None + actual_instance: Union[TypoToleranceEnum, bool, None] = None one_of_schemas: Set[str] = {"TypoToleranceEnum", "bool"} def __init__(self, *args, **kwargs) -> None: @@ -43,12 +43,12 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[TypoToleranceEnum, bool]]: + def unwrap_actual_instance(self) -> Union[TypoToleranceEnum, bool, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -90,9 +90,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -102,8 +102,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], TypoToleranceEnum, bool]]: return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/recommend/models/value.py b/algoliasearch/recommend/models/value.py index a78eb08a1..78b88a53c 100644 --- a/algoliasearch/recommend/models/value.py +++ b/algoliasearch/recommend/models/value.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,18 +20,26 @@ from algoliasearch.recommend.models.sort_remaining_by import SortRemainingBy +_ALIASES = { + "order": "order", + "sort_remaining_by": "sortRemainingBy", + "hide": "hide", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class Value(BaseModel): """ Value """ - order: Optional[List[str]] = Field(default=None, alias="order") + order: Optional[List[str]] = None """ Explicit order of facets or facet values. This setting lets you always show specific facets or facet values at the top of the list. """ - sort_remaining_by: Optional[SortRemainingBy] = Field( - default=None, alias="sortRemainingBy" - ) - hide: Optional[List[str]] = Field(default=None, alias="hide") + sort_remaining_by: Optional[SortRemainingBy] = None + hide: Optional[List[str]] = None """ Hide facet values. """ model_config = ConfigDict( @@ -39,6 +47,7 @@ class Value(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/recommend/models/widgets.py b/algoliasearch/recommend/models/widgets.py new file mode 100644 index 000000000..df7fc687d --- /dev/null +++ b/algoliasearch/recommend/models/widgets.py @@ -0,0 +1,78 @@ +# coding: utf-8 + +""" +Code generated by OpenAPI Generator (https://openapi-generator.tech), manual changes will be lost - read more on https://github.com/algolia/api-clients-automation. DO NOT EDIT. +""" + +from __future__ import annotations + +from json import loads +from sys import version_info +from typing import Any, Dict, Optional + +from pydantic import BaseModel, ConfigDict + +if version_info >= (3, 11): + from typing import Self +else: + from typing_extensions import Self + + +from algoliasearch.recommend.models.banners import Banners + +_ALIASES = { + "banners": "banners", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + +class Widgets(BaseModel): + """ + widgets returned from any rules that are applied to the current search. + """ + + banners: Optional[Banners] = None + + model_config = ConfigDict( + use_enum_values=True, + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + alias_generator=_alias_generator, + ) + + def to_json(self) -> str: + return self.model_dump_json(by_alias=True, exclude_unset=True) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of Widgets from a JSON string""" + return cls.from_dict(loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias.""" + return self.model_dump( + by_alias=True, + exclude_none=True, + exclude_unset=True, + ) + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of Widgets from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + obj["banners"] = ( + Banners.from_dict(obj["banners"]) + if obj.get("banners") is not None + else None + ) + + return cls.model_validate(obj) diff --git a/algoliasearch/search/client.py b/algoliasearch/search/client.py index 7eaa6112e..dac4a4aa5 100644 --- a/algoliasearch/search/client.py +++ b/algoliasearch/search/client.py @@ -14,7 +14,7 @@ from re import search from sys import version_info from time import time -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, Dict, List, Optional, Union from urllib.parse import quote from pydantic import Field, StrictBool, StrictInt, StrictStr @@ -26,6 +26,7 @@ from typing_extensions import Self from algoliasearch.http.api_response import ApiResponse +from algoliasearch.http.base_config import BaseConfig from algoliasearch.http.exceptions import RequestException, ValidUntilNotFoundException from algoliasearch.http.helpers import ( RetryTimeout, @@ -33,7 +34,7 @@ create_iterable_sync, ) from algoliasearch.http.request_options import RequestOptions -from algoliasearch.http.serializer import QueryParametersSerializer, bodySerializer +from algoliasearch.http.serializer import QueryParametersSerializer, body_serializer from algoliasearch.http.transporter import Transporter from algoliasearch.http.transporter_sync import TransporterSync from algoliasearch.http.verb import Verb @@ -85,6 +86,7 @@ from algoliasearch.search.models.log_type import LogType from algoliasearch.search.models.multiple_batch_response import MultipleBatchResponse from algoliasearch.search.models.operation_index_params import OperationIndexParams +from algoliasearch.search.models.operation_type import OperationType from algoliasearch.search.models.remove_user_id_response import RemoveUserIdResponse from algoliasearch.search.models.replace_all_objects_response import ( ReplaceAllObjectsResponse, @@ -150,7 +152,7 @@ class SearchClient: """ _transporter: Transporter - _config: SearchConfig + _config: BaseConfig _request_options: RequestOptions def __init__( @@ -161,7 +163,7 @@ def __init__( config: Optional[SearchConfig] = None, ) -> None: if transporter is not None and config is None: - config = transporter._config + config = SearchConfig(transporter.config.app_id, transporter.config.api_key) if config is None: config = SearchConfig(app_id, api_key) @@ -212,7 +214,7 @@ async def close(self) -> None: async def set_client_api_key(self, api_key: str) -> None: """Sets a new API key to authenticate requests.""" - self._transporter._config.set_client_api_key(api_key) + self._transporter.config.set_client_api_key(api_key) async def wait_for_task( self, @@ -225,21 +227,22 @@ async def wait_for_task( """ Helper: Wait for a task to be published (completed) for a given `indexName` and `taskID`. """ - self._retry_count = 0 + _retry_count = 0 - async def _func(_: GetTaskResponse) -> GetTaskResponse: + async def _func(_: Optional[GetTaskResponse]) -> GetTaskResponse: return await self.get_task(index_name, task_id, request_options) def _aggregator(_: GetTaskResponse) -> None: - self._retry_count += 1 + nonlocal _retry_count + _retry_count += 1 return await create_iterable( func=_func, aggregator=_aggregator, validate=lambda _resp: _resp.status == "published", - timeout=lambda: timeout(self._retry_count), - error_validate=lambda _: self._retry_count >= max_retries, - error_message=lambda: f"The maximum number of retries exceeded. (${self._retry_count}/${max_retries})", + timeout=lambda: timeout(_retry_count), + error_validate=lambda _: _retry_count >= max_retries, + error_message=lambda _: f"The maximum number of retries exceeded. (${_retry_count}/${max_retries})", ) async def wait_for_app_task( @@ -252,28 +255,29 @@ async def wait_for_app_task( """ Helper: Wait for an application-level task to complete for a given `taskID`. """ - self._retry_count = 0 + _retry_count = 0 - async def _func(_: GetTaskResponse) -> GetTaskResponse: + async def _func(_: Optional[GetTaskResponse]) -> GetTaskResponse: return await self.get_app_task(task_id, request_options) def _aggregator(_: GetTaskResponse) -> None: - self._retry_count += 1 + nonlocal _retry_count + _retry_count += 1 return await create_iterable( func=_func, aggregator=_aggregator, validate=lambda _resp: _resp.status == "published", - timeout=lambda: timeout(self._retry_count), - error_validate=lambda _: self._retry_count >= max_retries, - error_message=lambda: f"The maximum number of retries exceeded. (${self._retry_count}/${max_retries})", + timeout=lambda: timeout(_retry_count), + error_validate=lambda _: _retry_count >= max_retries, + error_message=lambda _: f"The maximum number of retries exceeded. (${_retry_count}/${max_retries})", ) async def wait_for_api_key( self, key: str, operation: str, - api_key: Optional[ApiKey] = None, + api_key: Optional[Union[ApiKey, dict[str, Any]]] = None, max_retries: int = 50, timeout: RetryTimeout = RetryTimeout(), request_options: Optional[Union[dict, RequestOptions]] = None, @@ -281,32 +285,37 @@ async def wait_for_api_key( """ Helper: Wait for an API key to be added, updated or deleted based on a given `operation`. """ - self._retry_count = 0 + _retry_count = 0 if operation == "update" and api_key is None: raise ValueError( "`apiKey` is required when waiting for an `update` operation." ) - async def _func(_prev: GetApiKeyResponse | None) -> GetApiKeyResponse | None: + async def _func(_prev: Optional[GetApiKeyResponse]) -> GetApiKeyResponse: try: return await self.get_api_key(key=key, request_options=request_options) except RequestException as e: if e.status_code == 404 and ( operation == "delete" or operation == "add" ): - return None + return None # pyright: ignore raise e def _aggregator(_: GetApiKeyResponse | None) -> None: - self._retry_count += 1 + nonlocal _retry_count + _retry_count += 1 def _validate(_resp: GetApiKeyResponse | None) -> bool: if operation == "update": + if _resp is None: + return False resp_dict = _resp.to_dict() api_key_dict = ( api_key.to_dict() if isinstance(api_key, ApiKey) else api_key ) + if api_key_dict is None: + return False for field in api_key_dict: if isinstance(api_key_dict[field], list) and isinstance( resp_dict[field], list @@ -327,28 +336,28 @@ def _validate(_resp: GetApiKeyResponse | None) -> bool: func=_func, validate=_validate, aggregator=_aggregator, - timeout=lambda: timeout(self._retry_count), - error_validate=lambda _: self._retry_count >= max_retries, - error_message=lambda _: f"The maximum number of retries exceeded. (${self._retry_count}/${max_retries})", + timeout=lambda: timeout(_retry_count), + error_validate=lambda _: _retry_count >= max_retries, + error_message=lambda _: f"The maximum number of retries exceeded. (${_retry_count}/${max_retries})", ) async def browse_objects( self, index_name: str, aggregator: Optional[Callable[[BrowseResponse], None]], - browse_params: Optional[BrowseParamsObject] = BrowseParamsObject(), + browse_params: BrowseParamsObject = BrowseParamsObject(), request_options: Optional[Union[dict, RequestOptions]] = None, ) -> BrowseResponse: """ Helper: Iterate on the `browse` method of the client to allow aggregating objects of an index. """ - async def _func(_prev: BrowseResponse) -> BrowseResponse: + async def _func(_prev: Optional[BrowseResponse]) -> BrowseResponse: if _prev is not None and _prev.cursor is not None: browse_params.cursor = _prev.cursor return await self.browse( index_name=index_name, - browse_params=browse_params, + browse_params=BrowseParams(browse_params), request_options=request_options, ) @@ -362,18 +371,17 @@ async def browse_rules( self, index_name: str, aggregator: Optional[Callable[[SearchRulesResponse], None]], - search_rules_params: Optional[SearchRulesParams] = SearchRulesParams( - hits_per_page=1000 - ), + search_rules_params: SearchRulesParams = SearchRulesParams(hits_per_page=1000), request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SearchRulesResponse: """ Helper: Iterate on the `search_rules` method of the client to allow aggregating rules of an index. """ - if search_rules_params is not None: + if search_rules_params.hits_per_page is None: search_rules_params.hits_per_page = 1000 + hits_per_page = search_rules_params.hits_per_page - async def _func(_prev: SearchRulesResponse) -> SearchRulesResponse: + async def _func(_prev: Optional[SearchRulesResponse]) -> SearchRulesResponse: if _prev is not None: search_rules_params.page = _prev.page + 1 return await self.search_rules( @@ -384,7 +392,7 @@ async def _func(_prev: SearchRulesResponse) -> SearchRulesResponse: return await create_iterable( func=_func, - validate=lambda _resp: _resp.nb_hits < search_rules_params.hits_per_page, + validate=lambda _resp: _resp.nb_hits < hits_per_page, aggregator=aggregator, ) @@ -392,28 +400,34 @@ async def browse_synonyms( self, index_name: str, aggregator: Callable[[SearchSynonymsResponse], None], - search_synonyms_params: Optional[SearchSynonymsParams] = SearchSynonymsParams(), + search_synonyms_params: SearchSynonymsParams = SearchSynonymsParams( + hits_per_page=1000 + ), request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SearchSynonymsResponse: """ Helper: Iterate on the `search_synonyms` method of the client to allow aggregating synonyms of an index. """ - if search_synonyms_params.page is None: - search_synonyms_params.page = 0 - search_synonyms_params.hits_per_page = 1000 + hits_per_page = 1000 + page = search_synonyms_params.page or 0 + search_synonyms_params.hits_per_page = hits_per_page - async def _func(_prev: SearchRulesResponse) -> SearchRulesResponse: + async def _func( + _prev: Optional[SearchSynonymsResponse], + ) -> SearchSynonymsResponse: + nonlocal page resp = await self.search_synonyms( index_name=index_name, search_synonyms_params=search_synonyms_params, request_options=request_options, ) - search_synonyms_params.page += 1 + page += 1 + search_synonyms_params.page = page return resp return await create_iterable( func=_func, - validate=lambda _resp: _resp.nb_hits < search_synonyms_params.hits_per_page, + validate=lambda _resp: _resp.nb_hits < hits_per_page, aggregator=aggregator, ) @@ -427,16 +441,21 @@ async def generate_secured_api_key( """ Helper: Generates a secured API key based on the given `parent_api_key` and given `restrictions`. """ - if not isinstance(restrictions, SecuredApiKeyRestrictions): - restrictions = SecuredApiKeyRestrictions.from_dict(restrictions) + restrictions_dict = {} + if isinstance(restrictions, SecuredApiKeyRestrictions): + restrictions_dict = restrictions.to_dict() + elif isinstance(restrictions, dict): + restrictions_dict = restrictions - restrictions = restrictions.to_dict() - if "searchParams" in restrictions: - restrictions = {**restrictions, **restrictions["searchParams"]} - del restrictions["searchParams"] + if "searchParams" in restrictions_dict: + restrictions_dict = { + **restrictions_dict, + **restrictions_dict["searchParams"], + } + del restrictions_dict["searchParams"] query_parameters = QueryParametersSerializer( - dict(sorted(restrictions.items())) + dict(sorted(restrictions_dict.items())) ).encoded() secured_key = hmac.new( @@ -504,7 +523,7 @@ async def partial_update_objects( self, index_name: str, objects: List[Dict[str, Any]], - create_if_not_exists: Optional[bool] = False, + create_if_not_exists: bool = False, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> List[BatchResponse]: """ @@ -557,7 +576,7 @@ async def replace_all_objects( objects: List[Dict[str, Any]], batch_size: int = 1000, request_options: Optional[Union[dict, RequestOptions]] = None, - ) -> List[ApiResponse[str]]: + ) -> ReplaceAllObjectsResponse: """ Helper: Replaces all objects (records) in the given `index_name` with the given `objects`. A temporary index is created during this process in order to backup your data. @@ -569,7 +588,7 @@ async def _copy() -> UpdatedAtResponse: return await self.operation_index( index_name=index_name, operation_index_params=OperationIndexParams( - operation="copy", + operation=OperationType.COPY, destination=tmp_index_name, scope=[ ScopeType("settings"), @@ -602,7 +621,7 @@ async def _copy() -> UpdatedAtResponse: move_operation_response = await self.operation_index( index_name=tmp_index_name, operation_index_params=OperationIndexParams( - operation="move", + operation=OperationType.MOVE, destination=index_name, ), request_options=request_options, @@ -632,7 +651,7 @@ async def index_exists(self, index_name: str) -> bool: async def add_api_key_with_http_info( self, - api_key: ApiKey, + api_key: Union[ApiKey, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -660,7 +679,7 @@ async def add_api_key_with_http_info( verb=Verb.POST, path="/1/keys", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -668,7 +687,7 @@ async def add_api_key_with_http_info( async def add_api_key( self, - api_key: ApiKey, + api_key: Union[ApiKey, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> AddApiKeyResponse: """ @@ -741,7 +760,7 @@ async def add_or_update_object_with_http_info( "{indexName}", quote(str(index_name), safe="") ).replace("{objectID}", quote(str(object_id), safe="")), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -784,7 +803,9 @@ async def add_or_update_object( async def append_source_with_http_info( self, - source: Annotated[Source, Field(description="Source to add.")], + source: Union[ + Annotated[Source, Field(description="Source to add.")], dict[str, Any] + ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -812,7 +833,7 @@ async def append_source_with_http_info( verb=Verb.POST, path="/1/security/sources/append", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -820,7 +841,9 @@ async def append_source_with_http_info( async def append_source( self, - source: Annotated[Source, Field(description="Source to add.")], + source: Union[ + Annotated[Source, Field(description="Source to add.")], dict[str, Any] + ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> CreatedAtResponse: """ @@ -846,7 +869,7 @@ async def assign_user_id_with_http_info( description="Unique identifier of the user who makes the search request.", ), ], - assign_user_id_params: AssignUserIdParams, + assign_user_id_params: Union[AssignUserIdParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -873,7 +896,7 @@ async def assign_user_id_with_http_info( "Parameter `assign_user_id_params` is required when calling `assign_user_id`." ) - _headers: Dict[str, Optional[str]] = {} + _headers: Dict[str, str] = {} if x_algolia_user_id is not None: _headers["x-algolia-user-id"] = x_algolia_user_id @@ -887,7 +910,7 @@ async def assign_user_id_with_http_info( path="/1/clusters/mapping", request_options=self._request_options.merge( headers=_headers, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -902,7 +925,7 @@ async def assign_user_id( description="Unique identifier of the user who makes the search request.", ), ], - assign_user_id_params: AssignUserIdParams, + assign_user_id_params: Union[AssignUserIdParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> CreatedAtResponse: """ @@ -929,7 +952,7 @@ async def batch_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - batch_write_params: BatchWriteParams, + batch_write_params: Union[BatchWriteParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -962,7 +985,7 @@ async def batch_with_http_info( "{indexName}", quote(str(index_name), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -974,7 +997,7 @@ async def batch( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - batch_write_params: BatchWriteParams, + batch_write_params: Union[BatchWriteParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> BatchResponse: """ @@ -1002,7 +1025,7 @@ async def batch_assign_user_ids_with_http_info( description="Unique identifier of the user who makes the search request.", ), ], - batch_assign_user_ids_params: BatchAssignUserIdsParams, + batch_assign_user_ids_params: Union[BatchAssignUserIdsParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -1029,7 +1052,7 @@ async def batch_assign_user_ids_with_http_info( "Parameter `batch_assign_user_ids_params` is required when calling `batch_assign_user_ids`." ) - _headers: Dict[str, Optional[str]] = {} + _headers: Dict[str, str] = {} if x_algolia_user_id is not None: _headers["x-algolia-user-id"] = x_algolia_user_id @@ -1043,7 +1066,7 @@ async def batch_assign_user_ids_with_http_info( path="/1/clusters/mapping/batch", request_options=self._request_options.merge( headers=_headers, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -1058,7 +1081,7 @@ async def batch_assign_user_ids( description="Unique identifier of the user who makes the search request.", ), ], - batch_assign_user_ids_params: BatchAssignUserIdsParams, + batch_assign_user_ids_params: Union[BatchAssignUserIdsParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> CreatedAtResponse: """ @@ -1081,10 +1104,15 @@ async def batch_assign_user_ids( async def batch_dictionary_entries_with_http_info( self, - dictionary_name: Annotated[ - DictionaryType, Field(description="Dictionary type in which to search.") + dictionary_name: Union[ + Annotated[ + DictionaryType, Field(description="Dictionary type in which to search.") + ], + str, + ], + batch_dictionary_entries_params: Union[ + BatchDictionaryEntriesParams, dict[str, Any] ], - batch_dictionary_entries_params: BatchDictionaryEntriesParams, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -1121,7 +1149,7 @@ async def batch_dictionary_entries_with_http_info( "{dictionaryName}", quote(str(dictionary_name), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -1129,10 +1157,15 @@ async def batch_dictionary_entries_with_http_info( async def batch_dictionary_entries( self, - dictionary_name: Annotated[ - DictionaryType, Field(description="Dictionary type in which to search.") + dictionary_name: Union[ + Annotated[ + DictionaryType, Field(description="Dictionary type in which to search.") + ], + str, + ], + batch_dictionary_entries_params: Union[ + BatchDictionaryEntriesParams, dict[str, Any] ], - batch_dictionary_entries_params: BatchDictionaryEntriesParams, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> UpdatedAtResponse: """ @@ -1159,7 +1192,7 @@ async def browse_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - browse_params: Optional[BrowseParams] = None, + browse_params: Union[Optional[BrowseParams], dict[str, Any]] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -1191,7 +1224,7 @@ async def browse_with_http_info( "{indexName}", quote(str(index_name), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -1203,7 +1236,7 @@ async def browse( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - browse_params: Optional[BrowseParams] = None, + browse_params: Union[Optional[BrowseParams], dict[str, Any]] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> BrowseResponse: """ @@ -1313,10 +1346,10 @@ async def clear_rules_with_http_info( "Parameter `index_name` is required when calling `clear_rules`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if forward_to_replicas is not None: - _query_parameters.append(("forwardToReplicas", forward_to_replicas)) + _query_parameters["forwardToReplicas"] = forward_to_replicas return await self._transporter.request( verb=Verb.POST, @@ -1391,10 +1424,10 @@ async def clear_synonyms_with_http_info( "Parameter `index_name` is required when calling `clear_synonyms`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if forward_to_replicas is not None: - _query_parameters.append(("forwardToReplicas", forward_to_replicas)) + _query_parameters["forwardToReplicas"] = forward_to_replicas return await self._transporter.request( verb=Verb.POST, @@ -1469,11 +1502,11 @@ async def custom_delete_with_http_info( "Parameter `path` is required when calling `custom_delete`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return await self._transporter.request( verb=Verb.DELETE, @@ -1544,11 +1577,11 @@ async def custom_get_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_get`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return await self._transporter.request( verb=Verb.GET, @@ -1623,11 +1656,11 @@ async def custom_post_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_post`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -1638,7 +1671,7 @@ async def custom_post_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -1715,11 +1748,11 @@ async def custom_put_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_put`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -1730,7 +1763,7 @@ async def custom_put_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -1828,7 +1861,7 @@ async def delete_by_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - delete_by_params: DeleteByParams, + delete_by_params: Union[DeleteByParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -1865,7 +1898,7 @@ async def delete_by_with_http_info( "{indexName}", quote(str(index_name), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -1877,7 +1910,7 @@ async def delete_by( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - delete_by_params: DeleteByParams, + delete_by_params: Union[DeleteByParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> DeletedAtResponse: """ @@ -2068,10 +2101,10 @@ async def delete_rule_with_http_info( "Parameter `object_id` is required when calling `delete_rule`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if forward_to_replicas is not None: - _query_parameters.append(("forwardToReplicas", forward_to_replicas)) + _query_parameters["forwardToReplicas"] = forward_to_replicas return await self._transporter.request( verb=Verb.DELETE, @@ -2217,10 +2250,10 @@ async def delete_synonym_with_http_info( "Parameter `object_id` is required when calling `delete_synonym`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if forward_to_replicas is not None: - _query_parameters.append(("forwardToReplicas", forward_to_replicas)) + _query_parameters["forwardToReplicas"] = forward_to_replicas return await self._transporter.request( verb=Verb.DELETE, @@ -2455,11 +2488,14 @@ async def get_logs_with_http_info( description="Index for which to retrieve log entries. By default, log entries are retrieved for all indices. " ), ] = None, - type: Annotated[ - Optional[LogType], - Field( - description="Type of log entries to retrieve. By default, all log entries are retrieved. " - ), + type: Union[ + Annotated[ + Optional[LogType], + Field( + description="Type of log entries to retrieve. By default, all log entries are retrieved. " + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -2481,16 +2517,16 @@ async def get_logs_with_http_info( :return: Returns the raw algoliasearch 'APIResponse' object. """ - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if offset is not None: - _query_parameters.append(("offset", offset)) + _query_parameters["offset"] = offset if length is not None: - _query_parameters.append(("length", length)) + _query_parameters["length"] = length if index_name is not None: - _query_parameters.append(("indexName", index_name)) + _query_parameters["indexName"] = index_name if type is not None: - _query_parameters.append(("type", type)) + _query_parameters["type"] = type return await self._transporter.request( verb=Verb.GET, @@ -2520,11 +2556,14 @@ async def get_logs( description="Index for which to retrieve log entries. By default, log entries are retrieved for all indices. " ), ] = None, - type: Annotated[ - Optional[LogType], - Field( - description="Type of log entries to retrieve. By default, all log entries are retrieved. " - ), + type: Union[ + Annotated[ + Optional[LogType], + Field( + description="Type of log entries to retrieve. By default, all log entries are retrieved. " + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> GetLogsResponse: @@ -2591,10 +2630,10 @@ async def get_object_with_http_info( "Parameter `object_id` is required when calling `get_object`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if attributes_to_retrieve is not None: - _query_parameters.append(("attributesToRetrieve", attributes_to_retrieve)) + _query_parameters["attributesToRetrieve"] = attributes_to_retrieve return await self._transporter.request( verb=Verb.GET, @@ -2645,8 +2684,9 @@ async def get_object( async def get_objects_with_http_info( self, - get_objects_params: Annotated[ - GetObjectsParams, Field(description="Request object.") + get_objects_params: Union[ + Annotated[GetObjectsParams, Field(description="Request object.")], + dict[str, Any], ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -2675,7 +2715,7 @@ async def get_objects_with_http_info( verb=Verb.POST, path="/1/indexes/*/objects", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=True, @@ -2683,8 +2723,9 @@ async def get_objects_with_http_info( async def get_objects( self, - get_objects_params: Annotated[ - GetObjectsParams, Field(description="Request object.") + get_objects_params: Union[ + Annotated[GetObjectsParams, Field(description="Request object.")], + dict[str, Any], ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> GetObjectsResponse: @@ -3139,10 +3180,10 @@ async def has_pending_mappings_with_http_info( :return: Returns the raw algoliasearch 'APIResponse' object. """ - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if get_clusters is not None: - _query_parameters.append(("getClusters", get_clusters)) + _query_parameters["getClusters"] = get_clusters return await self._transporter.request( verb=Verb.GET, @@ -3281,12 +3322,12 @@ async def list_indices_with_http_info( :return: Returns the raw algoliasearch 'APIResponse' object. """ - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if page is not None: - _query_parameters.append(("page", page)) + _query_parameters["page"] = page if hits_per_page is not None: - _query_parameters.append(("hitsPerPage", hits_per_page)) + _query_parameters["hitsPerPage"] = hits_per_page return await self._transporter.request( verb=Verb.GET, @@ -3356,12 +3397,12 @@ async def list_user_ids_with_http_info( :return: Returns the raw algoliasearch 'APIResponse' object. """ - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if page is not None: - _query_parameters.append(("page", page)) + _query_parameters["page"] = page if hits_per_page is not None: - _query_parameters.append(("hitsPerPage", hits_per_page)) + _query_parameters["hitsPerPage"] = hits_per_page return await self._transporter.request( verb=Verb.GET, @@ -3406,7 +3447,7 @@ async def list_user_ids( async def multiple_batch_with_http_info( self, - batch_params: BatchParams, + batch_params: Union[BatchParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -3432,7 +3473,7 @@ async def multiple_batch_with_http_info( verb=Verb.POST, path="/1/indexes/*/batch", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -3440,7 +3481,7 @@ async def multiple_batch_with_http_info( async def multiple_batch( self, - batch_params: BatchParams, + batch_params: Union[BatchParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> MultipleBatchResponse: """ @@ -3461,7 +3502,7 @@ async def operation_index_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - operation_index_params: OperationIndexParams, + operation_index_params: Union[OperationIndexParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -3498,7 +3539,7 @@ async def operation_index_with_http_info( "{indexName}", quote(str(index_name), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -3510,7 +3551,7 @@ async def operation_index( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - operation_index_params: OperationIndexParams, + operation_index_params: Union[OperationIndexParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> UpdatedAtResponse: """ @@ -3580,10 +3621,10 @@ async def partial_update_object_with_http_info( "Parameter `attributes_to_update` is required when calling `partial_update_object`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if create_if_not_exists is not None: - _query_parameters.append(("createIfNotExists", create_if_not_exists)) + _query_parameters["createIfNotExists"] = create_if_not_exists _data = {} if attributes_to_update is not None: @@ -3596,7 +3637,7 @@ async def partial_update_object_with_http_info( ).replace("{objectID}", quote(str(object_id), safe="")), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -3710,7 +3751,10 @@ async def remove_user_id( async def replace_sources_with_http_info( self, - source: Annotated[List[Source], Field(description="Allowed sources.")], + source: Union[ + Annotated[List[Source], Field(description="Allowed sources.")], + list[dict[str, Any]], + ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -3738,7 +3782,7 @@ async def replace_sources_with_http_info( verb=Verb.PUT, path="/1/security/sources", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -3746,7 +3790,10 @@ async def replace_sources_with_http_info( async def replace_sources( self, - source: Annotated[List[Source], Field(description="Allowed sources.")], + source: Union[ + Annotated[List[Source], Field(description="Allowed sources.")], + list[dict[str, Any]], + ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ReplaceSourceResponse: """ @@ -3859,7 +3906,7 @@ async def save_object_with_http_info( "{indexName}", quote(str(index_name), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -3904,7 +3951,7 @@ async def save_rule_with_http_info( object_id: Annotated[ StrictStr, Field(description="Unique identifier of a rule object.") ], - rule: Rule, + rule: Union[Rule, dict[str, Any]], forward_to_replicas: Annotated[ Optional[StrictBool], Field(description="Whether changes are applied to replica indices."), @@ -3942,10 +3989,10 @@ async def save_rule_with_http_info( if rule is None: raise ValueError("Parameter `rule` is required when calling `save_rule`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if forward_to_replicas is not None: - _query_parameters.append(("forwardToReplicas", forward_to_replicas)) + _query_parameters["forwardToReplicas"] = forward_to_replicas _data = {} if rule is not None: @@ -3958,7 +4005,7 @@ async def save_rule_with_http_info( ).replace("{objectID}", quote(str(object_id), safe="")), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -3973,7 +4020,7 @@ async def save_rule( object_id: Annotated[ StrictStr, Field(description="Unique identifier of a rule object.") ], - rule: Rule, + rule: Union[Rule, dict[str, Any]], forward_to_replicas: Annotated[ Optional[StrictBool], Field(description="Whether changes are applied to replica indices."), @@ -4008,7 +4055,7 @@ async def save_rules_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - rules: List[Rule], + rules: Union[List[Rule], list[dict[str, Any]]], forward_to_replicas: Annotated[ Optional[StrictBool], Field(description="Whether changes are applied to replica indices."), @@ -4047,12 +4094,12 @@ async def save_rules_with_http_info( if rules is None: raise ValueError("Parameter `rules` is required when calling `save_rules`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if forward_to_replicas is not None: - _query_parameters.append(("forwardToReplicas", forward_to_replicas)) + _query_parameters["forwardToReplicas"] = forward_to_replicas if clear_existing_rules is not None: - _query_parameters.append(("clearExistingRules", clear_existing_rules)) + _query_parameters["clearExistingRules"] = clear_existing_rules _data = {} if rules is not None: @@ -4065,7 +4112,7 @@ async def save_rules_with_http_info( ), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -4077,7 +4124,7 @@ async def save_rules( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - rules: List[Rule], + rules: Union[List[Rule], list[dict[str, Any]]], forward_to_replicas: Annotated[ Optional[StrictBool], Field(description="Whether changes are applied to replica indices."), @@ -4125,7 +4172,7 @@ async def save_synonym_with_http_info( object_id: Annotated[ StrictStr, Field(description="Unique identifier of a synonym object.") ], - synonym_hit: SynonymHit, + synonym_hit: Union[SynonymHit, dict[str, Any]], forward_to_replicas: Annotated[ Optional[StrictBool], Field(description="Whether changes are applied to replica indices."), @@ -4165,10 +4212,10 @@ async def save_synonym_with_http_info( "Parameter `synonym_hit` is required when calling `save_synonym`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if forward_to_replicas is not None: - _query_parameters.append(("forwardToReplicas", forward_to_replicas)) + _query_parameters["forwardToReplicas"] = forward_to_replicas _data = {} if synonym_hit is not None: @@ -4181,7 +4228,7 @@ async def save_synonym_with_http_info( ).replace("{objectID}", quote(str(object_id), safe="")), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -4196,7 +4243,7 @@ async def save_synonym( object_id: Annotated[ StrictStr, Field(description="Unique identifier of a synonym object.") ], - synonym_hit: SynonymHit, + synonym_hit: Union[SynonymHit, dict[str, Any]], forward_to_replicas: Annotated[ Optional[StrictBool], Field(description="Whether changes are applied to replica indices."), @@ -4231,7 +4278,7 @@ async def save_synonyms_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - synonym_hit: List[SynonymHit], + synonym_hit: Union[List[SynonymHit], list[dict[str, Any]]], forward_to_replicas: Annotated[ Optional[StrictBool], Field(description="Whether changes are applied to replica indices."), @@ -4272,14 +4319,12 @@ async def save_synonyms_with_http_info( "Parameter `synonym_hit` is required when calling `save_synonyms`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if forward_to_replicas is not None: - _query_parameters.append(("forwardToReplicas", forward_to_replicas)) + _query_parameters["forwardToReplicas"] = forward_to_replicas if replace_existing_synonyms is not None: - _query_parameters.append( - ("replaceExistingSynonyms", replace_existing_synonyms) - ) + _query_parameters["replaceExistingSynonyms"] = replace_existing_synonyms _data = {} if synonym_hit is not None: @@ -4292,7 +4337,7 @@ async def save_synonyms_with_http_info( ), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -4304,7 +4349,7 @@ async def save_synonyms( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - synonym_hit: List[SynonymHit], + synonym_hit: Union[List[SynonymHit], list[dict[str, Any]]], forward_to_replicas: Annotated[ Optional[StrictBool], Field(description="Whether changes are applied to replica indices."), @@ -4345,11 +4390,14 @@ async def save_synonyms( async def search_with_http_info( self, - search_method_params: Annotated[ - SearchMethodParams, - Field( - description="Muli-search request body. Results are returned in the same order as the requests." - ), + search_method_params: Union[ + Annotated[ + SearchMethodParams, + Field( + description="Muli-search request body. Results are returned in the same order as the requests." + ), + ], + dict[str, Any], ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -4378,7 +4426,7 @@ async def search_with_http_info( verb=Verb.POST, path="/1/indexes/*/queries", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=True, @@ -4386,11 +4434,14 @@ async def search_with_http_info( async def search( self, - search_method_params: Annotated[ - SearchMethodParams, - Field( - description="Muli-search request body. Results are returned in the same order as the requests." - ), + search_method_params: Union[ + Annotated[ + SearchMethodParams, + Field( + description="Muli-search request body. Results are returned in the same order as the requests." + ), + ], + dict[str, Any], ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SearchResponses: @@ -4410,10 +4461,15 @@ async def search( async def search_dictionary_entries_with_http_info( self, - dictionary_name: Annotated[ - DictionaryType, Field(description="Dictionary type in which to search.") + dictionary_name: Union[ + Annotated[ + DictionaryType, Field(description="Dictionary type in which to search.") + ], + str, + ], + search_dictionary_entries_params: Union[ + SearchDictionaryEntriesParams, dict[str, Any] ], - search_dictionary_entries_params: SearchDictionaryEntriesParams, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -4450,7 +4506,7 @@ async def search_dictionary_entries_with_http_info( "{dictionaryName}", quote(str(dictionary_name), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=True, @@ -4458,10 +4514,15 @@ async def search_dictionary_entries_with_http_info( async def search_dictionary_entries( self, - dictionary_name: Annotated[ - DictionaryType, Field(description="Dictionary type in which to search.") + dictionary_name: Union[ + Annotated[ + DictionaryType, Field(description="Dictionary type in which to search.") + ], + str, + ], + search_dictionary_entries_params: Union[ + SearchDictionaryEntriesParams, dict[str, Any] ], - search_dictionary_entries_params: SearchDictionaryEntriesParams, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SearchDictionaryEntriesResponse: """ @@ -4494,7 +4555,9 @@ async def search_for_facet_values_with_http_info( description="Facet attribute in which to search for values. This attribute must be included in the `attributesForFaceting` index setting with the `searchable()` modifier. " ), ], - search_for_facet_values_request: Optional[SearchForFacetValuesRequest] = None, + search_for_facet_values_request: Union[ + Optional[SearchForFacetValuesRequest], dict[str, Any] + ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -4533,7 +4596,7 @@ async def search_for_facet_values_with_http_info( "{indexName}", quote(str(index_name), safe="") ).replace("{facetName}", quote(str(facet_name), safe="")), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=True, @@ -4551,7 +4614,9 @@ async def search_for_facet_values( description="Facet attribute in which to search for values. This attribute must be included in the `attributesForFaceting` index setting with the `searchable()` modifier. " ), ], - search_for_facet_values_request: Optional[SearchForFacetValuesRequest] = None, + search_for_facet_values_request: Union[ + Optional[SearchForFacetValuesRequest], dict[str, Any] + ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SearchForFacetValuesResponse: """ @@ -4580,7 +4645,7 @@ async def search_rules_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - search_rules_params: Optional[SearchRulesParams] = None, + search_rules_params: Union[Optional[SearchRulesParams], dict[str, Any]] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -4612,7 +4677,7 @@ async def search_rules_with_http_info( "{indexName}", quote(str(index_name), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=True, @@ -4624,7 +4689,7 @@ async def search_rules( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - search_rules_params: Optional[SearchRulesParams] = None, + search_rules_params: Union[Optional[SearchRulesParams], dict[str, Any]] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SearchRulesResponse: """ @@ -4651,7 +4716,7 @@ async def search_single_index_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - search_params: Optional[SearchParams] = None, + search_params: Union[Optional[SearchParams], dict[str, Any]] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -4683,7 +4748,7 @@ async def search_single_index_with_http_info( "{indexName}", quote(str(index_name), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=True, @@ -4695,7 +4760,7 @@ async def search_single_index( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - search_params: Optional[SearchParams] = None, + search_params: Union[Optional[SearchParams], dict[str, Any]] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SearchResponse: """ @@ -4722,9 +4787,12 @@ async def search_synonyms_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - search_synonyms_params: Annotated[ - Optional[SearchSynonymsParams], - Field(description="Body of the `searchSynonyms` operation."), + search_synonyms_params: Union[ + Annotated[ + Optional[SearchSynonymsParams], + Field(description="Body of the `searchSynonyms` operation."), + ], + dict[str, Any], ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -4757,7 +4825,7 @@ async def search_synonyms_with_http_info( "{indexName}", quote(str(index_name), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=True, @@ -4769,9 +4837,12 @@ async def search_synonyms( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - search_synonyms_params: Annotated[ - Optional[SearchSynonymsParams], - Field(description="Body of the `searchSynonyms` operation."), + search_synonyms_params: Union[ + Annotated[ + Optional[SearchSynonymsParams], + Field(description="Body of the `searchSynonyms` operation."), + ], + dict[str, Any], ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SearchSynonymsResponse: @@ -4795,7 +4866,7 @@ async def search_synonyms( async def search_user_ids_with_http_info( self, - search_user_ids_params: SearchUserIdsParams, + search_user_ids_params: Union[SearchUserIdsParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -4823,7 +4894,7 @@ async def search_user_ids_with_http_info( verb=Verb.POST, path="/1/clusters/mapping/search", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=True, @@ -4831,7 +4902,7 @@ async def search_user_ids_with_http_info( async def search_user_ids( self, - search_user_ids_params: SearchUserIdsParams, + search_user_ids_params: Union[SearchUserIdsParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SearchUserIdsResponse: """ @@ -4852,7 +4923,7 @@ async def search_user_ids( async def set_dictionary_settings_with_http_info( self, - dictionary_settings_params: DictionarySettingsParams, + dictionary_settings_params: Union[DictionarySettingsParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -4880,7 +4951,7 @@ async def set_dictionary_settings_with_http_info( verb=Verb.PUT, path="/1/dictionaries/*/settings", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -4888,7 +4959,7 @@ async def set_dictionary_settings_with_http_info( async def set_dictionary_settings( self, - dictionary_settings_params: DictionarySettingsParams, + dictionary_settings_params: Union[DictionarySettingsParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> UpdatedAtResponse: """ @@ -4913,7 +4984,7 @@ async def set_settings_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - index_settings: IndexSettings, + index_settings: Union[IndexSettings, dict[str, Any]], forward_to_replicas: Annotated[ Optional[StrictBool], Field(description="Whether changes are applied to replica indices."), @@ -4946,10 +5017,10 @@ async def set_settings_with_http_info( "Parameter `index_settings` is required when calling `set_settings`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if forward_to_replicas is not None: - _query_parameters.append(("forwardToReplicas", forward_to_replicas)) + _query_parameters["forwardToReplicas"] = forward_to_replicas _data = {} if index_settings is not None: @@ -4962,7 +5033,7 @@ async def set_settings_with_http_info( ), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -4974,7 +5045,7 @@ async def set_settings( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - index_settings: IndexSettings, + index_settings: Union[IndexSettings, dict[str, Any]], forward_to_replicas: Annotated[ Optional[StrictBool], Field(description="Whether changes are applied to replica indices."), @@ -5004,7 +5075,7 @@ async def set_settings( async def update_api_key_with_http_info( self, key: Annotated[StrictStr, Field(description="API key.")], - api_key: ApiKey, + api_key: Union[ApiKey, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -5039,7 +5110,7 @@ async def update_api_key_with_http_info( verb=Verb.PUT, path="/1/keys/{key}".replace("{key}", quote(str(key), safe="")), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -5048,7 +5119,7 @@ async def update_api_key_with_http_info( async def update_api_key( self, key: Annotated[StrictStr, Field(description="API key.")], - api_key: ApiKey, + api_key: Union[ApiKey, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> UpdateApiKeyResponse: """ @@ -5087,7 +5158,7 @@ class SearchClientSync: """ _transporter: TransporterSync - _config: SearchConfig + _config: BaseConfig _request_options: RequestOptions def __init__( @@ -5098,7 +5169,7 @@ def __init__( config: Optional[SearchConfig] = None, ) -> None: if transporter is not None and config is None: - config = transporter._config + config = SearchConfig(transporter.config.app_id, transporter.config.api_key) if config is None: config = SearchConfig(app_id, api_key) @@ -5148,7 +5219,7 @@ def close(self) -> None: def set_client_api_key(self, api_key: str) -> None: """Sets a new API key to authenticate requests.""" - self._transporter._config.set_client_api_key(api_key) + self._transporter.config.set_client_api_key(api_key) def wait_for_task( self, @@ -5161,21 +5232,22 @@ def wait_for_task( """ Helper: Wait for a task to be published (completed) for a given `indexName` and `taskID`. """ - self._retry_count = 0 + _retry_count = 0 - def _func(_: GetTaskResponse) -> GetTaskResponse: + def _func(_: Optional[GetTaskResponse]) -> GetTaskResponse: return self.get_task(index_name, task_id, request_options) def _aggregator(_: GetTaskResponse) -> None: - self._retry_count += 1 + nonlocal _retry_count + _retry_count += 1 return create_iterable_sync( func=_func, aggregator=_aggregator, validate=lambda _resp: _resp.status == "published", - timeout=lambda: timeout(self._retry_count), - error_validate=lambda _: self._retry_count >= max_retries, - error_message=lambda: f"The maximum number of retries exceeded. (${self._retry_count}/${max_retries})", + timeout=lambda: timeout(_retry_count), + error_validate=lambda _: _retry_count >= max_retries, + error_message=lambda _: f"The maximum number of retries exceeded. (${_retry_count}/${max_retries})", ) def wait_for_app_task( @@ -5188,28 +5260,29 @@ def wait_for_app_task( """ Helper: Wait for an application-level task to complete for a given `taskID`. """ - self._retry_count = 0 + _retry_count = 0 - def _func(_: GetTaskResponse) -> GetTaskResponse: + def _func(_: Optional[GetTaskResponse]) -> GetTaskResponse: return self.get_app_task(task_id, request_options) def _aggregator(_: GetTaskResponse) -> None: - self._retry_count += 1 + nonlocal _retry_count + _retry_count += 1 return create_iterable_sync( func=_func, aggregator=_aggregator, validate=lambda _resp: _resp.status == "published", - timeout=lambda: timeout(self._retry_count), - error_validate=lambda _: self._retry_count >= max_retries, - error_message=lambda: f"The maximum number of retries exceeded. (${self._retry_count}/${max_retries})", + timeout=lambda: timeout(_retry_count), + error_validate=lambda _: _retry_count >= max_retries, + error_message=lambda _: f"The maximum number of retries exceeded. (${_retry_count}/${max_retries})", ) def wait_for_api_key( self, key: str, operation: str, - api_key: Optional[ApiKey] = None, + api_key: Optional[Union[ApiKey, dict[str, Any]]] = None, max_retries: int = 50, timeout: RetryTimeout = RetryTimeout(), request_options: Optional[Union[dict, RequestOptions]] = None, @@ -5217,32 +5290,37 @@ def wait_for_api_key( """ Helper: Wait for an API key to be added, updated or deleted based on a given `operation`. """ - self._retry_count = 0 + _retry_count = 0 if operation == "update" and api_key is None: raise ValueError( "`apiKey` is required when waiting for an `update` operation." ) - def _func(_prev: GetApiKeyResponse | None) -> GetApiKeyResponse | None: + def _func(_prev: Optional[GetApiKeyResponse]) -> GetApiKeyResponse: try: return self.get_api_key(key=key, request_options=request_options) except RequestException as e: if e.status_code == 404 and ( operation == "delete" or operation == "add" ): - return None + return None # pyright: ignore raise e def _aggregator(_: GetApiKeyResponse | None) -> None: - self._retry_count += 1 + nonlocal _retry_count + _retry_count += 1 def _validate(_resp: GetApiKeyResponse | None) -> bool: if operation == "update": + if _resp is None: + return False resp_dict = _resp.to_dict() api_key_dict = ( api_key.to_dict() if isinstance(api_key, ApiKey) else api_key ) + if api_key_dict is None: + return False for field in api_key_dict: if isinstance(api_key_dict[field], list) and isinstance( resp_dict[field], list @@ -5263,28 +5341,28 @@ def _validate(_resp: GetApiKeyResponse | None) -> bool: func=_func, validate=_validate, aggregator=_aggregator, - timeout=lambda: timeout(self._retry_count), - error_validate=lambda _: self._retry_count >= max_retries, - error_message=lambda _: f"The maximum number of retries exceeded. (${self._retry_count}/${max_retries})", + timeout=lambda: timeout(_retry_count), + error_validate=lambda _: _retry_count >= max_retries, + error_message=lambda _: f"The maximum number of retries exceeded. (${_retry_count}/${max_retries})", ) def browse_objects( self, index_name: str, aggregator: Optional[Callable[[BrowseResponse], None]], - browse_params: Optional[BrowseParamsObject] = BrowseParamsObject(), + browse_params: BrowseParamsObject = BrowseParamsObject(), request_options: Optional[Union[dict, RequestOptions]] = None, ) -> BrowseResponse: """ Helper: Iterate on the `browse` method of the client to allow aggregating objects of an index. """ - def _func(_prev: BrowseResponse) -> BrowseResponse: + def _func(_prev: Optional[BrowseResponse]) -> BrowseResponse: if _prev is not None and _prev.cursor is not None: browse_params.cursor = _prev.cursor return self.browse( index_name=index_name, - browse_params=browse_params, + browse_params=BrowseParams(browse_params), request_options=request_options, ) @@ -5298,18 +5376,17 @@ def browse_rules( self, index_name: str, aggregator: Optional[Callable[[SearchRulesResponse], None]], - search_rules_params: Optional[SearchRulesParams] = SearchRulesParams( - hits_per_page=1000 - ), + search_rules_params: SearchRulesParams = SearchRulesParams(hits_per_page=1000), request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SearchRulesResponse: """ Helper: Iterate on the `search_rules` method of the client to allow aggregating rules of an index. """ - if search_rules_params is not None: + if search_rules_params.hits_per_page is None: search_rules_params.hits_per_page = 1000 + hits_per_page = search_rules_params.hits_per_page - def _func(_prev: SearchRulesResponse) -> SearchRulesResponse: + def _func(_prev: Optional[SearchRulesResponse]) -> SearchRulesResponse: if _prev is not None: search_rules_params.page = _prev.page + 1 return self.search_rules( @@ -5320,7 +5397,7 @@ def _func(_prev: SearchRulesResponse) -> SearchRulesResponse: return create_iterable_sync( func=_func, - validate=lambda _resp: _resp.nb_hits < search_rules_params.hits_per_page, + validate=lambda _resp: _resp.nb_hits < hits_per_page, aggregator=aggregator, ) @@ -5328,28 +5405,32 @@ def browse_synonyms( self, index_name: str, aggregator: Callable[[SearchSynonymsResponse], None], - search_synonyms_params: Optional[SearchSynonymsParams] = SearchSynonymsParams(), + search_synonyms_params: SearchSynonymsParams = SearchSynonymsParams( + hits_per_page=1000 + ), request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SearchSynonymsResponse: """ Helper: Iterate on the `search_synonyms` method of the client to allow aggregating synonyms of an index. """ - if search_synonyms_params.page is None: - search_synonyms_params.page = 0 - search_synonyms_params.hits_per_page = 1000 + hits_per_page = 1000 + page = search_synonyms_params.page or 0 + search_synonyms_params.hits_per_page = hits_per_page - def _func(_prev: SearchRulesResponse) -> SearchRulesResponse: + def _func(_prev: Optional[SearchSynonymsResponse]) -> SearchSynonymsResponse: + nonlocal page resp = self.search_synonyms( index_name=index_name, search_synonyms_params=search_synonyms_params, request_options=request_options, ) - search_synonyms_params.page += 1 + page += 1 + search_synonyms_params.page = page return resp return create_iterable_sync( func=_func, - validate=lambda _resp: _resp.nb_hits < search_synonyms_params.hits_per_page, + validate=lambda _resp: _resp.nb_hits < hits_per_page, aggregator=aggregator, ) @@ -5363,16 +5444,21 @@ def generate_secured_api_key( """ Helper: Generates a secured API key based on the given `parent_api_key` and given `restrictions`. """ - if not isinstance(restrictions, SecuredApiKeyRestrictions): - restrictions = SecuredApiKeyRestrictions.from_dict(restrictions) + restrictions_dict = {} + if isinstance(restrictions, SecuredApiKeyRestrictions): + restrictions_dict = restrictions.to_dict() + elif isinstance(restrictions, dict): + restrictions_dict = restrictions - restrictions = restrictions.to_dict() - if "searchParams" in restrictions: - restrictions = {**restrictions, **restrictions["searchParams"]} - del restrictions["searchParams"] + if "searchParams" in restrictions_dict: + restrictions_dict = { + **restrictions_dict, + **restrictions_dict["searchParams"], + } + del restrictions_dict["searchParams"] query_parameters = QueryParametersSerializer( - dict(sorted(restrictions.items())) + dict(sorted(restrictions_dict.items())) ).encoded() secured_key = hmac.new( @@ -5440,7 +5526,7 @@ def partial_update_objects( self, index_name: str, objects: List[Dict[str, Any]], - create_if_not_exists: Optional[bool] = False, + create_if_not_exists: bool = False, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> List[BatchResponse]: """ @@ -5491,7 +5577,7 @@ def replace_all_objects( objects: List[Dict[str, Any]], batch_size: int = 1000, request_options: Optional[Union[dict, RequestOptions]] = None, - ) -> List[ApiResponse[str]]: + ) -> ReplaceAllObjectsResponse: """ Helper: Replaces all objects (records) in the given `index_name` with the given `objects`. A temporary index is created during this process in order to backup your data. @@ -5503,7 +5589,7 @@ def _copy() -> UpdatedAtResponse: return self.operation_index( index_name=index_name, operation_index_params=OperationIndexParams( - operation="copy", + operation=OperationType.COPY, destination=tmp_index_name, scope=[ ScopeType("settings"), @@ -5536,7 +5622,7 @@ def _copy() -> UpdatedAtResponse: move_operation_response = self.operation_index( index_name=tmp_index_name, operation_index_params=OperationIndexParams( - operation="move", + operation=OperationType.MOVE, destination=index_name, ), request_options=request_options, @@ -5566,7 +5652,7 @@ def index_exists(self, index_name: str) -> bool: def add_api_key_with_http_info( self, - api_key: ApiKey, + api_key: Union[ApiKey, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -5594,7 +5680,7 @@ def add_api_key_with_http_info( verb=Verb.POST, path="/1/keys", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -5602,7 +5688,7 @@ def add_api_key_with_http_info( def add_api_key( self, - api_key: ApiKey, + api_key: Union[ApiKey, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> AddApiKeyResponse: """ @@ -5675,7 +5761,7 @@ def add_or_update_object_with_http_info( "{indexName}", quote(str(index_name), safe="") ).replace("{objectID}", quote(str(object_id), safe="")), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -5718,7 +5804,9 @@ def add_or_update_object( def append_source_with_http_info( self, - source: Annotated[Source, Field(description="Source to add.")], + source: Union[ + Annotated[Source, Field(description="Source to add.")], dict[str, Any] + ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -5746,7 +5834,7 @@ def append_source_with_http_info( verb=Verb.POST, path="/1/security/sources/append", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -5754,7 +5842,9 @@ def append_source_with_http_info( def append_source( self, - source: Annotated[Source, Field(description="Source to add.")], + source: Union[ + Annotated[Source, Field(description="Source to add.")], dict[str, Any] + ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> CreatedAtResponse: """ @@ -5780,7 +5870,7 @@ def assign_user_id_with_http_info( description="Unique identifier of the user who makes the search request.", ), ], - assign_user_id_params: AssignUserIdParams, + assign_user_id_params: Union[AssignUserIdParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -5807,7 +5897,7 @@ def assign_user_id_with_http_info( "Parameter `assign_user_id_params` is required when calling `assign_user_id`." ) - _headers: Dict[str, Optional[str]] = {} + _headers: Dict[str, str] = {} if x_algolia_user_id is not None: _headers["x-algolia-user-id"] = x_algolia_user_id @@ -5821,7 +5911,7 @@ def assign_user_id_with_http_info( path="/1/clusters/mapping", request_options=self._request_options.merge( headers=_headers, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -5836,7 +5926,7 @@ def assign_user_id( description="Unique identifier of the user who makes the search request.", ), ], - assign_user_id_params: AssignUserIdParams, + assign_user_id_params: Union[AssignUserIdParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> CreatedAtResponse: """ @@ -5863,7 +5953,7 @@ def batch_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - batch_write_params: BatchWriteParams, + batch_write_params: Union[BatchWriteParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -5896,7 +5986,7 @@ def batch_with_http_info( "{indexName}", quote(str(index_name), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -5908,7 +5998,7 @@ def batch( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - batch_write_params: BatchWriteParams, + batch_write_params: Union[BatchWriteParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> BatchResponse: """ @@ -5936,7 +6026,7 @@ def batch_assign_user_ids_with_http_info( description="Unique identifier of the user who makes the search request.", ), ], - batch_assign_user_ids_params: BatchAssignUserIdsParams, + batch_assign_user_ids_params: Union[BatchAssignUserIdsParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -5963,7 +6053,7 @@ def batch_assign_user_ids_with_http_info( "Parameter `batch_assign_user_ids_params` is required when calling `batch_assign_user_ids`." ) - _headers: Dict[str, Optional[str]] = {} + _headers: Dict[str, str] = {} if x_algolia_user_id is not None: _headers["x-algolia-user-id"] = x_algolia_user_id @@ -5977,7 +6067,7 @@ def batch_assign_user_ids_with_http_info( path="/1/clusters/mapping/batch", request_options=self._request_options.merge( headers=_headers, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -5992,7 +6082,7 @@ def batch_assign_user_ids( description="Unique identifier of the user who makes the search request.", ), ], - batch_assign_user_ids_params: BatchAssignUserIdsParams, + batch_assign_user_ids_params: Union[BatchAssignUserIdsParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> CreatedAtResponse: """ @@ -6015,10 +6105,15 @@ def batch_assign_user_ids( def batch_dictionary_entries_with_http_info( self, - dictionary_name: Annotated[ - DictionaryType, Field(description="Dictionary type in which to search.") + dictionary_name: Union[ + Annotated[ + DictionaryType, Field(description="Dictionary type in which to search.") + ], + str, + ], + batch_dictionary_entries_params: Union[ + BatchDictionaryEntriesParams, dict[str, Any] ], - batch_dictionary_entries_params: BatchDictionaryEntriesParams, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -6055,7 +6150,7 @@ def batch_dictionary_entries_with_http_info( "{dictionaryName}", quote(str(dictionary_name), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -6063,10 +6158,15 @@ def batch_dictionary_entries_with_http_info( def batch_dictionary_entries( self, - dictionary_name: Annotated[ - DictionaryType, Field(description="Dictionary type in which to search.") + dictionary_name: Union[ + Annotated[ + DictionaryType, Field(description="Dictionary type in which to search.") + ], + str, + ], + batch_dictionary_entries_params: Union[ + BatchDictionaryEntriesParams, dict[str, Any] ], - batch_dictionary_entries_params: BatchDictionaryEntriesParams, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> UpdatedAtResponse: """ @@ -6093,7 +6193,7 @@ def browse_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - browse_params: Optional[BrowseParams] = None, + browse_params: Union[Optional[BrowseParams], dict[str, Any]] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -6125,7 +6225,7 @@ def browse_with_http_info( "{indexName}", quote(str(index_name), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -6137,7 +6237,7 @@ def browse( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - browse_params: Optional[BrowseParams] = None, + browse_params: Union[Optional[BrowseParams], dict[str, Any]] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> BrowseResponse: """ @@ -6245,10 +6345,10 @@ def clear_rules_with_http_info( "Parameter `index_name` is required when calling `clear_rules`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if forward_to_replicas is not None: - _query_parameters.append(("forwardToReplicas", forward_to_replicas)) + _query_parameters["forwardToReplicas"] = forward_to_replicas return self._transporter.request( verb=Verb.POST, @@ -6323,10 +6423,10 @@ def clear_synonyms_with_http_info( "Parameter `index_name` is required when calling `clear_synonyms`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if forward_to_replicas is not None: - _query_parameters.append(("forwardToReplicas", forward_to_replicas)) + _query_parameters["forwardToReplicas"] = forward_to_replicas return self._transporter.request( verb=Verb.POST, @@ -6401,11 +6501,11 @@ def custom_delete_with_http_info( "Parameter `path` is required when calling `custom_delete`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return self._transporter.request( verb=Verb.DELETE, @@ -6474,11 +6574,11 @@ def custom_get_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_get`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue return self._transporter.request( verb=Verb.GET, @@ -6553,11 +6653,11 @@ def custom_post_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_post`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -6568,7 +6668,7 @@ def custom_post_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -6643,11 +6743,11 @@ def custom_put_with_http_info( if path is None: raise ValueError("Parameter `path` is required when calling `custom_put`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if parameters is not None: for _qpkey, _qpvalue in parameters.items(): - _query_parameters.append((_qpkey, _qpvalue)) + _query_parameters[_qpkey] = _qpvalue _data = {} if body is not None: @@ -6658,7 +6758,7 @@ def custom_put_with_http_info( path="/{path}".replace("{path}", path), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -6754,7 +6854,7 @@ def delete_by_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - delete_by_params: DeleteByParams, + delete_by_params: Union[DeleteByParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -6791,7 +6891,7 @@ def delete_by_with_http_info( "{indexName}", quote(str(index_name), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -6803,7 +6903,7 @@ def delete_by( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - delete_by_params: DeleteByParams, + delete_by_params: Union[DeleteByParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> DeletedAtResponse: """ @@ -6992,10 +7092,10 @@ def delete_rule_with_http_info( "Parameter `object_id` is required when calling `delete_rule`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if forward_to_replicas is not None: - _query_parameters.append(("forwardToReplicas", forward_to_replicas)) + _query_parameters["forwardToReplicas"] = forward_to_replicas return self._transporter.request( verb=Verb.DELETE, @@ -7141,10 +7241,10 @@ def delete_synonym_with_http_info( "Parameter `object_id` is required when calling `delete_synonym`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if forward_to_replicas is not None: - _query_parameters.append(("forwardToReplicas", forward_to_replicas)) + _query_parameters["forwardToReplicas"] = forward_to_replicas return self._transporter.request( verb=Verb.DELETE, @@ -7379,11 +7479,14 @@ def get_logs_with_http_info( description="Index for which to retrieve log entries. By default, log entries are retrieved for all indices. " ), ] = None, - type: Annotated[ - Optional[LogType], - Field( - description="Type of log entries to retrieve. By default, all log entries are retrieved. " - ), + type: Union[ + Annotated[ + Optional[LogType], + Field( + description="Type of log entries to retrieve. By default, all log entries are retrieved. " + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -7405,16 +7508,16 @@ def get_logs_with_http_info( :return: Returns the raw algoliasearch 'APIResponse' object. """ - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if offset is not None: - _query_parameters.append(("offset", offset)) + _query_parameters["offset"] = offset if length is not None: - _query_parameters.append(("length", length)) + _query_parameters["length"] = length if index_name is not None: - _query_parameters.append(("indexName", index_name)) + _query_parameters["indexName"] = index_name if type is not None: - _query_parameters.append(("type", type)) + _query_parameters["type"] = type return self._transporter.request( verb=Verb.GET, @@ -7444,11 +7547,14 @@ def get_logs( description="Index for which to retrieve log entries. By default, log entries are retrieved for all indices. " ), ] = None, - type: Annotated[ - Optional[LogType], - Field( - description="Type of log entries to retrieve. By default, all log entries are retrieved. " - ), + type: Union[ + Annotated[ + Optional[LogType], + Field( + description="Type of log entries to retrieve. By default, all log entries are retrieved. " + ), + ], + str, ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> GetLogsResponse: @@ -7515,10 +7621,10 @@ def get_object_with_http_info( "Parameter `object_id` is required when calling `get_object`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if attributes_to_retrieve is not None: - _query_parameters.append(("attributesToRetrieve", attributes_to_retrieve)) + _query_parameters["attributesToRetrieve"] = attributes_to_retrieve return self._transporter.request( verb=Verb.GET, @@ -7569,8 +7675,9 @@ def get_object( def get_objects_with_http_info( self, - get_objects_params: Annotated[ - GetObjectsParams, Field(description="Request object.") + get_objects_params: Union[ + Annotated[GetObjectsParams, Field(description="Request object.")], + dict[str, Any], ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -7599,7 +7706,7 @@ def get_objects_with_http_info( verb=Verb.POST, path="/1/indexes/*/objects", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=True, @@ -7607,8 +7714,9 @@ def get_objects_with_http_info( def get_objects( self, - get_objects_params: Annotated[ - GetObjectsParams, Field(description="Request object.") + get_objects_params: Union[ + Annotated[GetObjectsParams, Field(description="Request object.")], + dict[str, Any], ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> GetObjectsResponse: @@ -8057,10 +8165,10 @@ def has_pending_mappings_with_http_info( :return: Returns the raw algoliasearch 'APIResponse' object. """ - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if get_clusters is not None: - _query_parameters.append(("getClusters", get_clusters)) + _query_parameters["getClusters"] = get_clusters return self._transporter.request( verb=Verb.GET, @@ -8197,12 +8305,12 @@ def list_indices_with_http_info( :return: Returns the raw algoliasearch 'APIResponse' object. """ - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if page is not None: - _query_parameters.append(("page", page)) + _query_parameters["page"] = page if hits_per_page is not None: - _query_parameters.append(("hitsPerPage", hits_per_page)) + _query_parameters["hitsPerPage"] = hits_per_page return self._transporter.request( verb=Verb.GET, @@ -8270,12 +8378,12 @@ def list_user_ids_with_http_info( :return: Returns the raw algoliasearch 'APIResponse' object. """ - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if page is not None: - _query_parameters.append(("page", page)) + _query_parameters["page"] = page if hits_per_page is not None: - _query_parameters.append(("hitsPerPage", hits_per_page)) + _query_parameters["hitsPerPage"] = hits_per_page return self._transporter.request( verb=Verb.GET, @@ -8318,7 +8426,7 @@ def list_user_ids( def multiple_batch_with_http_info( self, - batch_params: BatchParams, + batch_params: Union[BatchParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -8344,7 +8452,7 @@ def multiple_batch_with_http_info( verb=Verb.POST, path="/1/indexes/*/batch", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -8352,7 +8460,7 @@ def multiple_batch_with_http_info( def multiple_batch( self, - batch_params: BatchParams, + batch_params: Union[BatchParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> MultipleBatchResponse: """ @@ -8373,7 +8481,7 @@ def operation_index_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - operation_index_params: OperationIndexParams, + operation_index_params: Union[OperationIndexParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -8410,7 +8518,7 @@ def operation_index_with_http_info( "{indexName}", quote(str(index_name), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -8422,7 +8530,7 @@ def operation_index( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - operation_index_params: OperationIndexParams, + operation_index_params: Union[OperationIndexParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> UpdatedAtResponse: """ @@ -8492,10 +8600,10 @@ def partial_update_object_with_http_info( "Parameter `attributes_to_update` is required when calling `partial_update_object`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if create_if_not_exists is not None: - _query_parameters.append(("createIfNotExists", create_if_not_exists)) + _query_parameters["createIfNotExists"] = create_if_not_exists _data = {} if attributes_to_update is not None: @@ -8508,7 +8616,7 @@ def partial_update_object_with_http_info( ).replace("{objectID}", quote(str(object_id), safe="")), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -8622,7 +8730,10 @@ def remove_user_id( def replace_sources_with_http_info( self, - source: Annotated[List[Source], Field(description="Allowed sources.")], + source: Union[ + Annotated[List[Source], Field(description="Allowed sources.")], + list[dict[str, Any]], + ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -8650,7 +8761,7 @@ def replace_sources_with_http_info( verb=Verb.PUT, path="/1/security/sources", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -8658,7 +8769,10 @@ def replace_sources_with_http_info( def replace_sources( self, - source: Annotated[List[Source], Field(description="Allowed sources.")], + source: Union[ + Annotated[List[Source], Field(description="Allowed sources.")], + list[dict[str, Any]], + ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ReplaceSourceResponse: """ @@ -8771,7 +8885,7 @@ def save_object_with_http_info( "{indexName}", quote(str(index_name), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -8816,7 +8930,7 @@ def save_rule_with_http_info( object_id: Annotated[ StrictStr, Field(description="Unique identifier of a rule object.") ], - rule: Rule, + rule: Union[Rule, dict[str, Any]], forward_to_replicas: Annotated[ Optional[StrictBool], Field(description="Whether changes are applied to replica indices."), @@ -8854,10 +8968,10 @@ def save_rule_with_http_info( if rule is None: raise ValueError("Parameter `rule` is required when calling `save_rule`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if forward_to_replicas is not None: - _query_parameters.append(("forwardToReplicas", forward_to_replicas)) + _query_parameters["forwardToReplicas"] = forward_to_replicas _data = {} if rule is not None: @@ -8870,7 +8984,7 @@ def save_rule_with_http_info( ).replace("{objectID}", quote(str(object_id), safe="")), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -8885,7 +8999,7 @@ def save_rule( object_id: Annotated[ StrictStr, Field(description="Unique identifier of a rule object.") ], - rule: Rule, + rule: Union[Rule, dict[str, Any]], forward_to_replicas: Annotated[ Optional[StrictBool], Field(description="Whether changes are applied to replica indices."), @@ -8920,7 +9034,7 @@ def save_rules_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - rules: List[Rule], + rules: Union[List[Rule], list[dict[str, Any]]], forward_to_replicas: Annotated[ Optional[StrictBool], Field(description="Whether changes are applied to replica indices."), @@ -8959,12 +9073,12 @@ def save_rules_with_http_info( if rules is None: raise ValueError("Parameter `rules` is required when calling `save_rules`.") - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if forward_to_replicas is not None: - _query_parameters.append(("forwardToReplicas", forward_to_replicas)) + _query_parameters["forwardToReplicas"] = forward_to_replicas if clear_existing_rules is not None: - _query_parameters.append(("clearExistingRules", clear_existing_rules)) + _query_parameters["clearExistingRules"] = clear_existing_rules _data = {} if rules is not None: @@ -8977,7 +9091,7 @@ def save_rules_with_http_info( ), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -8989,7 +9103,7 @@ def save_rules( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - rules: List[Rule], + rules: Union[List[Rule], list[dict[str, Any]]], forward_to_replicas: Annotated[ Optional[StrictBool], Field(description="Whether changes are applied to replica indices."), @@ -9037,7 +9151,7 @@ def save_synonym_with_http_info( object_id: Annotated[ StrictStr, Field(description="Unique identifier of a synonym object.") ], - synonym_hit: SynonymHit, + synonym_hit: Union[SynonymHit, dict[str, Any]], forward_to_replicas: Annotated[ Optional[StrictBool], Field(description="Whether changes are applied to replica indices."), @@ -9077,10 +9191,10 @@ def save_synonym_with_http_info( "Parameter `synonym_hit` is required when calling `save_synonym`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if forward_to_replicas is not None: - _query_parameters.append(("forwardToReplicas", forward_to_replicas)) + _query_parameters["forwardToReplicas"] = forward_to_replicas _data = {} if synonym_hit is not None: @@ -9093,7 +9207,7 @@ def save_synonym_with_http_info( ).replace("{objectID}", quote(str(object_id), safe="")), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -9108,7 +9222,7 @@ def save_synonym( object_id: Annotated[ StrictStr, Field(description="Unique identifier of a synonym object.") ], - synonym_hit: SynonymHit, + synonym_hit: Union[SynonymHit, dict[str, Any]], forward_to_replicas: Annotated[ Optional[StrictBool], Field(description="Whether changes are applied to replica indices."), @@ -9143,7 +9257,7 @@ def save_synonyms_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - synonym_hit: List[SynonymHit], + synonym_hit: Union[List[SynonymHit], list[dict[str, Any]]], forward_to_replicas: Annotated[ Optional[StrictBool], Field(description="Whether changes are applied to replica indices."), @@ -9184,14 +9298,12 @@ def save_synonyms_with_http_info( "Parameter `synonym_hit` is required when calling `save_synonyms`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if forward_to_replicas is not None: - _query_parameters.append(("forwardToReplicas", forward_to_replicas)) + _query_parameters["forwardToReplicas"] = forward_to_replicas if replace_existing_synonyms is not None: - _query_parameters.append( - ("replaceExistingSynonyms", replace_existing_synonyms) - ) + _query_parameters["replaceExistingSynonyms"] = replace_existing_synonyms _data = {} if synonym_hit is not None: @@ -9204,7 +9316,7 @@ def save_synonyms_with_http_info( ), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -9216,7 +9328,7 @@ def save_synonyms( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - synonym_hit: List[SynonymHit], + synonym_hit: Union[List[SynonymHit], list[dict[str, Any]]], forward_to_replicas: Annotated[ Optional[StrictBool], Field(description="Whether changes are applied to replica indices."), @@ -9257,11 +9369,14 @@ def save_synonyms( def search_with_http_info( self, - search_method_params: Annotated[ - SearchMethodParams, - Field( - description="Muli-search request body. Results are returned in the same order as the requests." - ), + search_method_params: Union[ + Annotated[ + SearchMethodParams, + Field( + description="Muli-search request body. Results are returned in the same order as the requests." + ), + ], + dict[str, Any], ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -9290,7 +9405,7 @@ def search_with_http_info( verb=Verb.POST, path="/1/indexes/*/queries", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=True, @@ -9298,11 +9413,14 @@ def search_with_http_info( def search( self, - search_method_params: Annotated[ - SearchMethodParams, - Field( - description="Muli-search request body. Results are returned in the same order as the requests." - ), + search_method_params: Union[ + Annotated[ + SearchMethodParams, + Field( + description="Muli-search request body. Results are returned in the same order as the requests." + ), + ], + dict[str, Any], ], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SearchResponses: @@ -9322,10 +9440,15 @@ def search( def search_dictionary_entries_with_http_info( self, - dictionary_name: Annotated[ - DictionaryType, Field(description="Dictionary type in which to search.") + dictionary_name: Union[ + Annotated[ + DictionaryType, Field(description="Dictionary type in which to search.") + ], + str, + ], + search_dictionary_entries_params: Union[ + SearchDictionaryEntriesParams, dict[str, Any] ], - search_dictionary_entries_params: SearchDictionaryEntriesParams, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -9362,7 +9485,7 @@ def search_dictionary_entries_with_http_info( "{dictionaryName}", quote(str(dictionary_name), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=True, @@ -9370,10 +9493,15 @@ def search_dictionary_entries_with_http_info( def search_dictionary_entries( self, - dictionary_name: Annotated[ - DictionaryType, Field(description="Dictionary type in which to search.") + dictionary_name: Union[ + Annotated[ + DictionaryType, Field(description="Dictionary type in which to search.") + ], + str, + ], + search_dictionary_entries_params: Union[ + SearchDictionaryEntriesParams, dict[str, Any] ], - search_dictionary_entries_params: SearchDictionaryEntriesParams, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SearchDictionaryEntriesResponse: """ @@ -9406,7 +9534,9 @@ def search_for_facet_values_with_http_info( description="Facet attribute in which to search for values. This attribute must be included in the `attributesForFaceting` index setting with the `searchable()` modifier. " ), ], - search_for_facet_values_request: Optional[SearchForFacetValuesRequest] = None, + search_for_facet_values_request: Union[ + Optional[SearchForFacetValuesRequest], dict[str, Any] + ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -9445,7 +9575,7 @@ def search_for_facet_values_with_http_info( "{indexName}", quote(str(index_name), safe="") ).replace("{facetName}", quote(str(facet_name), safe="")), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=True, @@ -9463,7 +9593,9 @@ def search_for_facet_values( description="Facet attribute in which to search for values. This attribute must be included in the `attributesForFaceting` index setting with the `searchable()` modifier. " ), ], - search_for_facet_values_request: Optional[SearchForFacetValuesRequest] = None, + search_for_facet_values_request: Union[ + Optional[SearchForFacetValuesRequest], dict[str, Any] + ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SearchForFacetValuesResponse: """ @@ -9492,7 +9624,7 @@ def search_rules_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - search_rules_params: Optional[SearchRulesParams] = None, + search_rules_params: Union[Optional[SearchRulesParams], dict[str, Any]] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -9524,7 +9656,7 @@ def search_rules_with_http_info( "{indexName}", quote(str(index_name), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=True, @@ -9536,7 +9668,7 @@ def search_rules( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - search_rules_params: Optional[SearchRulesParams] = None, + search_rules_params: Union[Optional[SearchRulesParams], dict[str, Any]] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SearchRulesResponse: """ @@ -9563,7 +9695,7 @@ def search_single_index_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - search_params: Optional[SearchParams] = None, + search_params: Union[Optional[SearchParams], dict[str, Any]] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -9595,7 +9727,7 @@ def search_single_index_with_http_info( "{indexName}", quote(str(index_name), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=True, @@ -9607,7 +9739,7 @@ def search_single_index( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - search_params: Optional[SearchParams] = None, + search_params: Union[Optional[SearchParams], dict[str, Any]] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SearchResponse: """ @@ -9634,9 +9766,12 @@ def search_synonyms_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - search_synonyms_params: Annotated[ - Optional[SearchSynonymsParams], - Field(description="Body of the `searchSynonyms` operation."), + search_synonyms_params: Union[ + Annotated[ + Optional[SearchSynonymsParams], + Field(description="Body of the `searchSynonyms` operation."), + ], + dict[str, Any], ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: @@ -9669,7 +9804,7 @@ def search_synonyms_with_http_info( "{indexName}", quote(str(index_name), safe="") ), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=True, @@ -9681,9 +9816,12 @@ def search_synonyms( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - search_synonyms_params: Annotated[ - Optional[SearchSynonymsParams], - Field(description="Body of the `searchSynonyms` operation."), + search_synonyms_params: Union[ + Annotated[ + Optional[SearchSynonymsParams], + Field(description="Body of the `searchSynonyms` operation."), + ], + dict[str, Any], ] = None, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SearchSynonymsResponse: @@ -9707,7 +9845,7 @@ def search_synonyms( def search_user_ids_with_http_info( self, - search_user_ids_params: SearchUserIdsParams, + search_user_ids_params: Union[SearchUserIdsParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -9735,7 +9873,7 @@ def search_user_ids_with_http_info( verb=Verb.POST, path="/1/clusters/mapping/search", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=True, @@ -9743,7 +9881,7 @@ def search_user_ids_with_http_info( def search_user_ids( self, - search_user_ids_params: SearchUserIdsParams, + search_user_ids_params: Union[SearchUserIdsParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> SearchUserIdsResponse: """ @@ -9764,7 +9902,7 @@ def search_user_ids( def set_dictionary_settings_with_http_info( self, - dictionary_settings_params: DictionarySettingsParams, + dictionary_settings_params: Union[DictionarySettingsParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -9792,7 +9930,7 @@ def set_dictionary_settings_with_http_info( verb=Verb.PUT, path="/1/dictionaries/*/settings", request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -9800,7 +9938,7 @@ def set_dictionary_settings_with_http_info( def set_dictionary_settings( self, - dictionary_settings_params: DictionarySettingsParams, + dictionary_settings_params: Union[DictionarySettingsParams, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> UpdatedAtResponse: """ @@ -9825,7 +9963,7 @@ def set_settings_with_http_info( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - index_settings: IndexSettings, + index_settings: Union[IndexSettings, dict[str, Any]], forward_to_replicas: Annotated[ Optional[StrictBool], Field(description="Whether changes are applied to replica indices."), @@ -9858,10 +9996,10 @@ def set_settings_with_http_info( "Parameter `index_settings` is required when calling `set_settings`." ) - _query_parameters: List[Tuple[str, str]] = [] + _query_parameters: Dict[str, Any] = {} if forward_to_replicas is not None: - _query_parameters.append(("forwardToReplicas", forward_to_replicas)) + _query_parameters["forwardToReplicas"] = forward_to_replicas _data = {} if index_settings is not None: @@ -9874,7 +10012,7 @@ def set_settings_with_http_info( ), request_options=self._request_options.merge( query_parameters=_query_parameters, - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -9886,7 +10024,7 @@ def set_settings( StrictStr, Field(description="Name of the index on which to perform the operation."), ], - index_settings: IndexSettings, + index_settings: Union[IndexSettings, dict[str, Any]], forward_to_replicas: Annotated[ Optional[StrictBool], Field(description="Whether changes are applied to replica indices."), @@ -9916,7 +10054,7 @@ def set_settings( def update_api_key_with_http_info( self, key: Annotated[StrictStr, Field(description="API key.")], - api_key: ApiKey, + api_key: Union[ApiKey, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> ApiResponse[str]: """ @@ -9951,7 +10089,7 @@ def update_api_key_with_http_info( verb=Verb.PUT, path="/1/keys/{key}".replace("{key}", quote(str(key), safe="")), request_options=self._request_options.merge( - data=dumps(bodySerializer(_data)), + data=dumps(body_serializer(_data)), user_request_options=request_options, ), use_read_transporter=False, @@ -9960,7 +10098,7 @@ def update_api_key_with_http_info( def update_api_key( self, key: Annotated[StrictStr, Field(description="API key.")], - api_key: ApiKey, + api_key: Union[ApiKey, dict[str, Any]], request_options: Optional[Union[dict, RequestOptions]] = None, ) -> UpdateApiKeyResponse: """ diff --git a/algoliasearch/search/config.py b/algoliasearch/search/config.py index de703b545..ea2a9e8be 100644 --- a/algoliasearch/search/config.py +++ b/algoliasearch/search/config.py @@ -1,4 +1,5 @@ from os import environ +from typing import Optional from algoliasearch.http.base_config import BaseConfig from algoliasearch.http.hosts import CallType, Host, HostsCollection @@ -6,11 +7,14 @@ class SearchConfig(BaseConfig): - def __init__(self, app_id: str, api_key: str) -> None: + def __init__(self, app_id: Optional[str], api_key: Optional[str]) -> None: super().__init__(app_id, api_key) user_agent = UserAgent().add("Search") + assert app_id, "`app_id` is missing." + assert api_key, "`api_key` is missing." + self.headers = { "x-algolia-application-id": app_id, "x-algolia-api-key": api_key, diff --git a/algoliasearch/search/models/add_api_key_response.py b/algoliasearch/search/models/add_api_key_response.py index fa2a4b326..992647ba4 100644 --- a/algoliasearch/search/models/add_api_key_response.py +++ b/algoliasearch/search/models/add_api_key_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "key": "key", + "created_at": "createdAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class AddApiKeyResponse(BaseModel): """ AddApiKeyResponse """ - key: str = Field(alias="key") + key: str """ API key. """ - created_at: str = Field(alias="createdAt") + created_at: str """ Date and time when the object was created, in RFC 3339 format. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class AddApiKeyResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/api_key.py b/algoliasearch/search/models/api_key.py index d08aeb17d..042e1c613 100644 --- a/algoliasearch/search/models/api_key.py +++ b/algoliasearch/search/models/api_key.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,29 +20,42 @@ from algoliasearch.search.models.acl import Acl +_ALIASES = { + "acl": "acl", + "description": "description", + "indexes": "indexes", + "max_hits_per_query": "maxHitsPerQuery", + "max_queries_per_ip_per_hour": "maxQueriesPerIPPerHour", + "query_parameters": "queryParameters", + "referers": "referers", + "validity": "validity", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ApiKey(BaseModel): """ API key object. """ - acl: List[Acl] = Field(alias="acl") + acl: List[Acl] """ Permissions that determine the type of API requests this key can make. The required ACL is listed in each endpoint's reference. For more information, see [access control list](https://www.algolia.com/doc/guides/security/api-keys/#access-control-list-acl). """ - description: Optional[str] = Field(default=None, alias="description") + description: Optional[str] = None """ Description of an API key to help you identify this API key. """ - indexes: Optional[List[str]] = Field(default=None, alias="indexes") + indexes: Optional[List[str]] = None """ Index names or patterns that this API key can access. By default, an API key can access all indices in the same application. You can use leading and trailing wildcard characters (`*`): - `dev_*` matches all indices starting with \"dev_\". - `*_dev` matches all indices ending with \"_dev\". - `*_products_*` matches all indices containing \"_products_\". """ - max_hits_per_query: Optional[int] = Field(default=None, alias="maxHitsPerQuery") + max_hits_per_query: Optional[int] = None """ Maximum number of results this API key can retrieve in one query. By default, there's no limit. """ - max_queries_per_ip_per_hour: Optional[int] = Field( - default=None, alias="maxQueriesPerIPPerHour" - ) + max_queries_per_ip_per_hour: Optional[int] = None """ Maximum number of API requests allowed per IP address or [user token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/) per hour. If this limit is reached, the API returns an error with status code `429`. By default, there's no limit. """ - query_parameters: Optional[str] = Field(default=None, alias="queryParameters") + query_parameters: Optional[str] = None """ Query parameters to add when making API requests with this API key. To restrict this API key to specific IP addresses, add the `restrictSources` parameter. You can only add a single source, but you can provide a range of IP addresses. Creating an API key fails if the request is made from an IP address that's outside the restricted range. """ - referers: Optional[List[str]] = Field(default=None, alias="referers") + referers: Optional[List[str]] = None """ Allowed HTTP referrers for this API key. By default, all referrers are allowed. You can use leading and trailing wildcard characters (`*`): - `https://algolia.com/*` allows all referrers starting with \"https://algolia.com/\" - `*.algolia.com` allows all referrers ending with \".algolia.com\" - `*algolia.com*` allows all referrers in the domain \"algolia.com\". Like all HTTP headers, referrers can be spoofed. Don't rely on them to secure your data. For more information, see [HTTP referrer restrictions](https://www.algolia.com/doc/guides/security/security-best-practices/#http-referrers-restrictions). """ - validity: Optional[int] = Field(default=None, alias="validity") + validity: Optional[int] = None """ Duration (in seconds) after which the API key expires. By default, API keys don't expire. """ model_config = ConfigDict( @@ -50,6 +63,7 @@ class ApiKey(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/around_precision.py b/algoliasearch/search/models/around_precision.py index a1be2c4af..f18767532 100644 --- a/algoliasearch/search/models/around_precision.py +++ b/algoliasearch/search/models/around_precision.py @@ -30,7 +30,7 @@ class AroundPrecision(BaseModel): """ Distance in meters to group results by similar distances. For example, if you set `aroundPrecision` to 100, records wihin 100 meters to the central coordinate are considered to have the same distance, as are records between 100 and 199 meters. """ oneof_schema_2_validator: Optional[List[Range]] = Field(default=None) - actual_instance: Optional[Union[List[Range], int]] = None + actual_instance: Union[List[Range], int, None] = None one_of_schemas: Set[str] = {"List[Range]", "int"} def __init__(self, *args, **kwargs) -> None: @@ -43,12 +43,12 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[List[Range], int]]: + def unwrap_actual_instance(self) -> Union[List[Range], int, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -91,9 +91,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -103,8 +103,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], List[Range], int]]: return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/search/models/around_radius.py b/algoliasearch/search/models/around_radius.py index 73bfa99b8..e1f38c58d 100644 --- a/algoliasearch/search/models/around_radius.py +++ b/algoliasearch/search/models/around_radius.py @@ -30,7 +30,7 @@ class AroundRadius(BaseModel): """ Maximum search radius around a central location in meters. """ oneof_schema_2_validator: Optional[AroundRadiusAll] = Field(default=None) - actual_instance: Optional[Union[AroundRadiusAll, int]] = None + actual_instance: Union[AroundRadiusAll, int, None] = None one_of_schemas: Set[str] = {"AroundRadiusAll", "int"} def __init__(self, *args, **kwargs) -> None: @@ -43,12 +43,12 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[AroundRadiusAll, int]]: + def unwrap_actual_instance(self) -> Union[AroundRadiusAll, int, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -90,9 +90,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -102,8 +102,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], AroundRadiusAll, int]]: return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/search/models/assign_user_id_params.py b/algoliasearch/search/models/assign_user_id_params.py index 77cd64bec..2d0fcae31 100644 --- a/algoliasearch/search/models/assign_user_id_params.py +++ b/algoliasearch/search/models/assign_user_id_params.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "cluster": "cluster", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class AssignUserIdParams(BaseModel): """ Assign userID parameters. """ - cluster: str = Field(alias="cluster") + cluster: str """ Cluster name. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class AssignUserIdParams(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/attribute_to_update.py b/algoliasearch/search/models/attribute_to_update.py index 63608a107..5e7ce7099 100644 --- a/algoliasearch/search/models/attribute_to_update.py +++ b/algoliasearch/search/models/attribute_to_update.py @@ -30,7 +30,7 @@ class AttributeToUpdate(BaseModel): oneof_schema_2_validator: Optional[BuiltInOperation] = Field(default=None) - actual_instance: Optional[Union[BuiltInOperation, str]] = None + actual_instance: Union[BuiltInOperation, str, None] = None one_of_schemas: Set[str] = {"BuiltInOperation", "str"} def __init__(self, *args, **kwargs) -> None: @@ -43,12 +43,12 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[BuiltInOperation, str]]: + def unwrap_actual_instance(self) -> Union[BuiltInOperation, str, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -90,9 +90,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -102,8 +102,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], BuiltInOperation, str]]: return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/search/models/automatic_facet_filter.py b/algoliasearch/search/models/automatic_facet_filter.py index 5a2610015..81bf34697 100644 --- a/algoliasearch/search/models/automatic_facet_filter.py +++ b/algoliasearch/search/models/automatic_facet_filter.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "facet": "facet", + "score": "score", + "disjunctive": "disjunctive", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class AutomaticFacetFilter(BaseModel): """ Filter or optional filter to be applied to the search. """ - facet: str = Field(alias="facet") + facet: str """ Facet name to be applied as filter. The name must match placeholders in the `pattern` parameter. For example, with `pattern: {facet:genre}`, `automaticFacetFilters` must be `genre`. """ - score: Optional[int] = Field(default=None, alias="score") + score: Optional[int] = None """ Filter scores to give different weights to individual filters. """ - disjunctive: Optional[bool] = Field(default=None, alias="disjunctive") + disjunctive: Optional[bool] = None """ Whether the filter is disjunctive or conjunctive. If true the filter has multiple matches, multiple occurences are combined with the logical `OR` operation. If false, multiple occurences are combined with the logical `AND` operation. """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class AutomaticFacetFilter(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/automatic_facet_filters.py b/algoliasearch/search/models/automatic_facet_filters.py index 9e408c85e..b3b04a2bc 100644 --- a/algoliasearch/search/models/automatic_facet_filters.py +++ b/algoliasearch/search/models/automatic_facet_filters.py @@ -30,7 +30,7 @@ class AutomaticFacetFilters(BaseModel): oneof_schema_2_validator: Optional[List[str]] = Field(default=None) - actual_instance: Optional[Union[List[AutomaticFacetFilter], List[str]]] = None + actual_instance: Union[List[AutomaticFacetFilter], List[str], None] = None one_of_schemas: Set[str] = {"List[AutomaticFacetFilter]", "List[str]"} def __init__(self, *args, **kwargs) -> None: @@ -43,14 +43,14 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer def unwrap_actual_instance( self, - ) -> Optional[Union[List[AutomaticFacetFilter], List[str]]]: + ) -> Union[List[AutomaticFacetFilter], List[str], Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -93,9 +93,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -107,8 +107,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/search/models/banner.py b/algoliasearch/search/models/banner.py new file mode 100644 index 000000000..ff76f7791 --- /dev/null +++ b/algoliasearch/search/models/banner.py @@ -0,0 +1,84 @@ +# coding: utf-8 + +""" +Code generated by OpenAPI Generator (https://openapi-generator.tech), manual changes will be lost - read more on https://github.com/algolia/api-clients-automation. DO NOT EDIT. +""" + +from __future__ import annotations + +from json import loads +from sys import version_info +from typing import Any, Dict, Optional + +from pydantic import BaseModel, ConfigDict + +if version_info >= (3, 11): + from typing import Self +else: + from typing_extensions import Self + + +from algoliasearch.search.models.banner_image import BannerImage +from algoliasearch.search.models.banner_link import BannerLink + +_ALIASES = { + "image": "image", + "link": "link", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + +class Banner(BaseModel): + """ + a search banner with image and url. + """ + + image: Optional[BannerImage] = None + link: Optional[BannerLink] = None + + model_config = ConfigDict( + use_enum_values=True, + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + alias_generator=_alias_generator, + ) + + def to_json(self) -> str: + return self.model_dump_json(by_alias=True, exclude_unset=True) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of Banner from a JSON string""" + return cls.from_dict(loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias.""" + return self.model_dump( + by_alias=True, + exclude_none=True, + exclude_unset=True, + ) + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of Banner from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + obj["image"] = ( + BannerImage.from_dict(obj["image"]) + if obj.get("image") is not None + else None + ) + obj["link"] = ( + BannerLink.from_dict(obj["link"]) if obj.get("link") is not None else None + ) + + return cls.model_validate(obj) diff --git a/algoliasearch/search/models/banner_image.py b/algoliasearch/search/models/banner_image.py new file mode 100644 index 000000000..bcc62198b --- /dev/null +++ b/algoliasearch/search/models/banner_image.py @@ -0,0 +1,80 @@ +# coding: utf-8 + +""" +Code generated by OpenAPI Generator (https://openapi-generator.tech), manual changes will be lost - read more on https://github.com/algolia/api-clients-automation. DO NOT EDIT. +""" + +from __future__ import annotations + +from json import loads +from sys import version_info +from typing import Any, Dict, Optional + +from pydantic import BaseModel, ConfigDict + +if version_info >= (3, 11): + from typing import Self +else: + from typing_extensions import Self + + +from algoliasearch.search.models.banner_image_url import BannerImageUrl + +_ALIASES = { + "urls": "urls", + "title": "title", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + +class BannerImage(BaseModel): + """ + image of a search banner. + """ + + urls: Optional[BannerImageUrl] = None + title: Optional[str] = None + + model_config = ConfigDict( + use_enum_values=True, + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + alias_generator=_alias_generator, + ) + + def to_json(self) -> str: + return self.model_dump_json(by_alias=True, exclude_unset=True) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of BannerImage from a JSON string""" + return cls.from_dict(loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias.""" + return self.model_dump( + by_alias=True, + exclude_none=True, + exclude_unset=True, + ) + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of BannerImage from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + obj["urls"] = ( + BannerImageUrl.from_dict(obj["urls"]) + if obj.get("urls") is not None + else None + ) + + return cls.model_validate(obj) diff --git a/algoliasearch/search/models/banner_image_url.py b/algoliasearch/search/models/banner_image_url.py new file mode 100644 index 000000000..630f01a1f --- /dev/null +++ b/algoliasearch/search/models/banner_image_url.py @@ -0,0 +1,70 @@ +# coding: utf-8 + +""" +Code generated by OpenAPI Generator (https://openapi-generator.tech), manual changes will be lost - read more on https://github.com/algolia/api-clients-automation. DO NOT EDIT. +""" + +from __future__ import annotations + +from json import loads +from sys import version_info +from typing import Any, Dict, Optional + +from pydantic import BaseModel, ConfigDict + +if version_info >= (3, 11): + from typing import Self +else: + from typing_extensions import Self + + +_ALIASES = { + "url": "url", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + +class BannerImageUrl(BaseModel): + """ + url for a search banner image. + """ + + url: Optional[str] = None + + model_config = ConfigDict( + use_enum_values=True, + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + alias_generator=_alias_generator, + ) + + def to_json(self) -> str: + return self.model_dump_json(by_alias=True, exclude_unset=True) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of BannerImageUrl from a JSON string""" + return cls.from_dict(loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias.""" + return self.model_dump( + by_alias=True, + exclude_none=True, + exclude_unset=True, + ) + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of BannerImageUrl from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + return cls.model_validate(obj) diff --git a/algoliasearch/search/models/banner_link.py b/algoliasearch/search/models/banner_link.py new file mode 100644 index 000000000..d1edada9a --- /dev/null +++ b/algoliasearch/search/models/banner_link.py @@ -0,0 +1,70 @@ +# coding: utf-8 + +""" +Code generated by OpenAPI Generator (https://openapi-generator.tech), manual changes will be lost - read more on https://github.com/algolia/api-clients-automation. DO NOT EDIT. +""" + +from __future__ import annotations + +from json import loads +from sys import version_info +from typing import Any, Dict, Optional + +from pydantic import BaseModel, ConfigDict + +if version_info >= (3, 11): + from typing import Self +else: + from typing_extensions import Self + + +_ALIASES = { + "url": "url", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + +class BannerLink(BaseModel): + """ + link for a banner defined in merchandising studio. + """ + + url: Optional[str] = None + + model_config = ConfigDict( + use_enum_values=True, + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + alias_generator=_alias_generator, + ) + + def to_json(self) -> str: + return self.model_dump_json(by_alias=True, exclude_unset=True) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of BannerLink from a JSON string""" + return cls.from_dict(loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias.""" + return self.model_dump( + by_alias=True, + exclude_none=True, + exclude_unset=True, + ) + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of BannerLink from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + return cls.model_validate(obj) diff --git a/algoliasearch/search/models/banners.py b/algoliasearch/search/models/banners.py new file mode 100644 index 000000000..fda833736 --- /dev/null +++ b/algoliasearch/search/models/banners.py @@ -0,0 +1,76 @@ +# coding: utf-8 + +""" +Code generated by OpenAPI Generator (https://openapi-generator.tech), manual changes will be lost - read more on https://github.com/algolia/api-clients-automation. DO NOT EDIT. +""" + +from __future__ import annotations + +from json import loads +from sys import version_info +from typing import Any, Dict, Optional + +from pydantic import BaseModel, ConfigDict + +if version_info >= (3, 11): + from typing import Self +else: + from typing_extensions import Self + + +from algoliasearch.search.models.banner import Banner + +_ALIASES = { + "banners": "banners", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + +class Banners(BaseModel): + """ + banners defined in the merchandising studio for the given search. + """ + + banners: Optional[Banner] = None + + model_config = ConfigDict( + use_enum_values=True, + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + alias_generator=_alias_generator, + ) + + def to_json(self) -> str: + return self.model_dump_json(by_alias=True, exclude_unset=True) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of Banners from a JSON string""" + return cls.from_dict(loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias.""" + return self.model_dump( + by_alias=True, + exclude_none=True, + exclude_unset=True, + ) + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of Banners from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + obj["banners"] = ( + Banner.from_dict(obj["banners"]) if obj.get("banners") is not None else None + ) + + return cls.model_validate(obj) diff --git a/algoliasearch/search/models/batch_assign_user_ids_params.py b/algoliasearch/search/models/batch_assign_user_ids_params.py index a89349963..211b33c81 100644 --- a/algoliasearch/search/models/batch_assign_user_ids_params.py +++ b/algoliasearch/search/models/batch_assign_user_ids_params.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "cluster": "cluster", + "users": "users", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class BatchAssignUserIdsParams(BaseModel): """ Assign userID parameters. """ - cluster: str = Field(alias="cluster") + cluster: str """ Cluster name. """ - users: List[str] = Field(alias="users") + users: List[str] """ User IDs to assign. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class BatchAssignUserIdsParams(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/batch_dictionary_entries_params.py b/algoliasearch/search/models/batch_dictionary_entries_params.py index fea54a98a..0b18b99ae 100644 --- a/algoliasearch/search/models/batch_dictionary_entries_params.py +++ b/algoliasearch/search/models/batch_dictionary_entries_params.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,17 +22,24 @@ BatchDictionaryEntriesRequest, ) +_ALIASES = { + "clear_existing_dictionary_entries": "clearExistingDictionaryEntries", + "requests": "requests", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class BatchDictionaryEntriesParams(BaseModel): """ Request body for updating dictionary entries. """ - clear_existing_dictionary_entries: Optional[bool] = Field( - default=None, alias="clearExistingDictionaryEntries" - ) + clear_existing_dictionary_entries: Optional[bool] = None """ Whether to replace all custom entries in the dictionary with the ones sent with this request. """ - requests: List[BatchDictionaryEntriesRequest] = Field(alias="requests") + requests: List[BatchDictionaryEntriesRequest] """ List of additions and deletions to your dictionaries. """ model_config = ConfigDict( @@ -40,6 +47,7 @@ class BatchDictionaryEntriesParams(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/batch_dictionary_entries_request.py b/algoliasearch/search/models/batch_dictionary_entries_request.py index 491af947a..fad1d5de1 100644 --- a/algoliasearch/search/models/batch_dictionary_entries_request.py +++ b/algoliasearch/search/models/batch_dictionary_entries_request.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,20 +21,30 @@ from algoliasearch.search.models.dictionary_action import DictionaryAction from algoliasearch.search.models.dictionary_entry import DictionaryEntry +_ALIASES = { + "action": "action", + "body": "body", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class BatchDictionaryEntriesRequest(BaseModel): """ BatchDictionaryEntriesRequest """ - action: DictionaryAction = Field(alias="action") - body: DictionaryEntry = Field(alias="body") + action: DictionaryAction + body: DictionaryEntry model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/batch_params.py b/algoliasearch/search/models/batch_params.py index 63e961f4d..175d1a773 100644 --- a/algoliasearch/search/models/batch_params.py +++ b/algoliasearch/search/models/batch_params.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,28 @@ from algoliasearch.search.models.multiple_batch_request import MultipleBatchRequest +_ALIASES = { + "requests": "requests", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class BatchParams(BaseModel): """ Batch parameters. """ - requests: List[MultipleBatchRequest] = Field(alias="requests") + requests: List[MultipleBatchRequest] model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/batch_request.py b/algoliasearch/search/models/batch_request.py index 22ff93915..2930adb3d 100644 --- a/algoliasearch/search/models/batch_request.py +++ b/algoliasearch/search/models/batch_request.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,14 +20,23 @@ from algoliasearch.search.models.action import Action +_ALIASES = { + "action": "action", + "body": "body", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class BatchRequest(BaseModel): """ BatchRequest """ - action: Action = Field(alias="action") - body: object = Field(alias="body") + action: Action + body: object """ Operation arguments (varies with specified `action`). """ model_config = ConfigDict( @@ -35,6 +44,7 @@ class BatchRequest(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/batch_response.py b/algoliasearch/search/models/batch_response.py index cd2b01324..fec36c51a 100644 --- a/algoliasearch/search/models/batch_response.py +++ b/algoliasearch/search/models/batch_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "task_id": "taskID", + "object_ids": "objectIDs", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class BatchResponse(BaseModel): """ BatchResponse """ - task_id: int = Field(alias="taskID") + task_id: int """ Unique identifier of a task. A successful API response means that a task was added to a queue. It might not run immediately. You can check the task's progress with the [`task` operation](#tag/Indices/operation/getTask) and this `taskID`. """ - object_ids: List[str] = Field(alias="objectIDs") + object_ids: List[str] """ Unique record identifiers. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class BatchResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/batch_write_params.py b/algoliasearch/search/models/batch_write_params.py index 7413ab13e..f8f0b629d 100644 --- a/algoliasearch/search/models/batch_write_params.py +++ b/algoliasearch/search/models/batch_write_params.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,28 @@ from algoliasearch.search.models.batch_request import BatchRequest +_ALIASES = { + "requests": "requests", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class BatchWriteParams(BaseModel): """ Batch parameters. """ - requests: List[BatchRequest] = Field(alias="requests") + requests: List[BatchRequest] model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/browse_params.py b/algoliasearch/search/models/browse_params.py index e05bfb68a..9c040fa6f 100644 --- a/algoliasearch/search/models/browse_params.py +++ b/algoliasearch/search/models/browse_params.py @@ -31,7 +31,7 @@ class BrowseParams(BaseModel): oneof_schema_2_validator: Optional[BrowseParamsObject] = Field(default=None) - actual_instance: Optional[Union[BrowseParamsObject, SearchParamsString]] = None + actual_instance: Union[BrowseParamsObject, SearchParamsString, None] = None one_of_schemas: Set[str] = {"BrowseParamsObject", "SearchParamsString"} def __init__(self, *args, **kwargs) -> None: @@ -44,14 +44,14 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer def unwrap_actual_instance( self, - ) -> Optional[Union[BrowseParamsObject, SearchParamsString]]: + ) -> Union[BrowseParamsObject, SearchParamsString, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -92,9 +92,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -106,8 +106,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/search/models/browse_params_object.py b/algoliasearch/search/models/browse_params_object.py index 5e6e03b9e..a4299111a 100644 --- a/algoliasearch/search/models/browse_params_object.py +++ b/algoliasearch/search/models/browse_params_object.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -43,222 +43,232 @@ from algoliasearch.search.models.tag_filters import TagFilters from algoliasearch.search.models.typo_tolerance import TypoTolerance +_ALIASES = { + "query": "query", + "similar_query": "similarQuery", + "filters": "filters", + "facet_filters": "facetFilters", + "optional_filters": "optionalFilters", + "numeric_filters": "numericFilters", + "tag_filters": "tagFilters", + "sum_or_filters_scores": "sumOrFiltersScores", + "restrict_searchable_attributes": "restrictSearchableAttributes", + "facets": "facets", + "faceting_after_distinct": "facetingAfterDistinct", + "page": "page", + "offset": "offset", + "length": "length", + "around_lat_lng": "aroundLatLng", + "around_lat_lng_via_ip": "aroundLatLngViaIP", + "around_radius": "aroundRadius", + "around_precision": "aroundPrecision", + "minimum_around_radius": "minimumAroundRadius", + "inside_bounding_box": "insideBoundingBox", + "inside_polygon": "insidePolygon", + "natural_languages": "naturalLanguages", + "rule_contexts": "ruleContexts", + "personalization_impact": "personalizationImpact", + "user_token": "userToken", + "get_ranking_info": "getRankingInfo", + "synonyms": "synonyms", + "click_analytics": "clickAnalytics", + "analytics": "analytics", + "analytics_tags": "analyticsTags", + "percentile_computation": "percentileComputation", + "enable_ab_test": "enableABTest", + "attributes_to_retrieve": "attributesToRetrieve", + "ranking": "ranking", + "custom_ranking": "customRanking", + "relevancy_strictness": "relevancyStrictness", + "attributes_to_highlight": "attributesToHighlight", + "attributes_to_snippet": "attributesToSnippet", + "highlight_pre_tag": "highlightPreTag", + "highlight_post_tag": "highlightPostTag", + "snippet_ellipsis_text": "snippetEllipsisText", + "restrict_highlight_and_snippet_arrays": "restrictHighlightAndSnippetArrays", + "hits_per_page": "hitsPerPage", + "min_word_sizefor1_typo": "minWordSizefor1Typo", + "min_word_sizefor2_typos": "minWordSizefor2Typos", + "typo_tolerance": "typoTolerance", + "allow_typos_on_numeric_tokens": "allowTyposOnNumericTokens", + "disable_typo_tolerance_on_attributes": "disableTypoToleranceOnAttributes", + "ignore_plurals": "ignorePlurals", + "remove_stop_words": "removeStopWords", + "keep_diacritics_on_characters": "keepDiacriticsOnCharacters", + "query_languages": "queryLanguages", + "decompound_query": "decompoundQuery", + "enable_rules": "enableRules", + "enable_personalization": "enablePersonalization", + "query_type": "queryType", + "remove_words_if_no_results": "removeWordsIfNoResults", + "mode": "mode", + "semantic_search": "semanticSearch", + "advanced_syntax": "advancedSyntax", + "optional_words": "optionalWords", + "disable_exact_on_attributes": "disableExactOnAttributes", + "exact_on_single_word_query": "exactOnSingleWordQuery", + "alternatives_as_exact": "alternativesAsExact", + "advanced_syntax_features": "advancedSyntaxFeatures", + "distinct": "distinct", + "replace_synonyms_in_highlight": "replaceSynonymsInHighlight", + "min_proximity": "minProximity", + "response_fields": "responseFields", + "max_facet_hits": "maxFacetHits", + "max_values_per_facet": "maxValuesPerFacet", + "sort_facet_values_by": "sortFacetValuesBy", + "attribute_criteria_computed_by_min_proximity": "attributeCriteriaComputedByMinProximity", + "rendering_content": "renderingContent", + "enable_re_ranking": "enableReRanking", + "re_ranking_apply_filter": "reRankingApplyFilter", + "cursor": "cursor", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class BrowseParamsObject(BaseModel): """ BrowseParamsObject """ - query: Optional[str] = Field(default=None, alias="query") + query: Optional[str] = None """ Search query. """ - similar_query: Optional[str] = Field(default=None, alias="similarQuery") + similar_query: Optional[str] = None """ Keywords to be used instead of the search query to conduct a more broader search. Using the `similarQuery` parameter changes other settings: - `queryType` is set to `prefixNone`. - `removeStopWords` is set to true. - `words` is set as the first ranking criterion. - All remaining words are treated as `optionalWords`. Since the `similarQuery` is supposed to do a broad search, they usually return many results. Combine it with `filters` to narrow down the list of results. """ - filters: Optional[str] = Field(default=None, alias="filters") + filters: Optional[str] = None """ Filter expression to only include items that match the filter criteria in the response. You can use these filter expressions: - **Numeric filters.** ` `, where `` is one of `<`, `<=`, `=`, `!=`, `>`, `>=`. - **Ranges.** `: TO ` where `` and `` are the lower and upper limits of the range (inclusive). - **Facet filters.** `:` where `` is a facet attribute (case-sensitive) and `` a facet value. - **Tag filters.** `_tags:` or just `` (case-sensitive). - **Boolean filters.** `: true | false`. You can combine filters with `AND`, `OR`, and `NOT` operators with the following restrictions: - You can only combine filters of the same type with `OR`. **Not supported:** `facet:value OR num > 3`. - You can't use `NOT` with combinations of filters. **Not supported:** `NOT(facet:value OR facet:value)` - You can't combine conjunctions (`AND`) with `OR`. **Not supported:** `facet:value OR (facet:value AND facet:value)` Use quotes around your filters, if the facet attribute name or facet value has spaces, keywords (`OR`, `AND`, `NOT`), or quotes. If a facet attribute is an array, the filter matches if it matches at least one element of the array. For more information, see [Filters](https://www.algolia.com/doc/guides/managing-results/refine-results/filtering/). """ - facet_filters: Optional[FacetFilters] = Field(default=None, alias="facetFilters") - optional_filters: Optional[OptionalFilters] = Field( - default=None, alias="optionalFilters" - ) - numeric_filters: Optional[NumericFilters] = Field( - default=None, alias="numericFilters" - ) - tag_filters: Optional[TagFilters] = Field(default=None, alias="tagFilters") - sum_or_filters_scores: Optional[bool] = Field( - default=None, alias="sumOrFiltersScores" - ) + facet_filters: Optional[FacetFilters] = None + optional_filters: Optional[OptionalFilters] = None + numeric_filters: Optional[NumericFilters] = None + tag_filters: Optional[TagFilters] = None + sum_or_filters_scores: Optional[bool] = None """ Whether to sum all filter scores. If true, all filter scores are summed. Otherwise, the maximum filter score is kept. For more information, see [filter scores](https://www.algolia.com/doc/guides/managing-results/refine-results/filtering/in-depth/filter-scoring/#accumulating-scores-with-sumorfiltersscores). """ - restrict_searchable_attributes: Optional[List[str]] = Field( - default=None, alias="restrictSearchableAttributes" - ) + restrict_searchable_attributes: Optional[List[str]] = None """ Restricts a search to a subset of your searchable attributes. Attribute names are case-sensitive. """ - facets: Optional[List[str]] = Field(default=None, alias="facets") + facets: Optional[List[str]] = None """ Facets for which to retrieve facet values that match the search criteria and the number of matching facet values. To retrieve all facets, use the wildcard character `*`. For more information, see [facets](https://www.algolia.com/doc/guides/managing-results/refine-results/faceting/#contextual-facet-values-and-counts). """ - faceting_after_distinct: Optional[bool] = Field( - default=None, alias="facetingAfterDistinct" - ) + faceting_after_distinct: Optional[bool] = None """ Whether faceting should be applied after deduplication with `distinct`. This leads to accurate facet counts when using faceting in combination with `distinct`. It's usually better to use `afterDistinct` modifiers in the `attributesForFaceting` setting, as `facetingAfterDistinct` only computes correct facet counts if all records have the same facet values for the `attributeForDistinct`. """ - page: Optional[int] = Field(default=None, alias="page") + page: Optional[int] = None """ Page of search results to retrieve. """ - offset: Optional[int] = Field(default=None, alias="offset") + offset: Optional[int] = None """ Position of the first hit to retrieve. """ - length: Optional[int] = Field(default=None, alias="length") + length: Optional[int] = None """ Number of hits to retrieve (used in combination with `offset`). """ - around_lat_lng: Optional[str] = Field(default=None, alias="aroundLatLng") + around_lat_lng: Optional[str] = None """ Coordinates for the center of a circle, expressed as a comma-separated string of latitude and longitude. Only records included within circle around this central location are included in the results. The radius of the circle is determined by the `aroundRadius` and `minimumAroundRadius` settings. This parameter is ignored if you also specify `insidePolygon` or `insideBoundingBox`. """ - around_lat_lng_via_ip: Optional[bool] = Field( - default=None, alias="aroundLatLngViaIP" - ) + around_lat_lng_via_ip: Optional[bool] = None """ Whether to obtain the coordinates from the request's IP address. """ - around_radius: Optional[AroundRadius] = Field(default=None, alias="aroundRadius") - around_precision: Optional[AroundPrecision] = Field( - default=None, alias="aroundPrecision" - ) - minimum_around_radius: Optional[int] = Field( - default=None, alias="minimumAroundRadius" - ) + around_radius: Optional[AroundRadius] = None + around_precision: Optional[AroundPrecision] = None + minimum_around_radius: Optional[int] = None """ Minimum radius (in meters) for a search around a location when `aroundRadius` isn't set. """ - inside_bounding_box: Optional[List[List[float]]] = Field( - default=None, alias="insideBoundingBox" - ) + inside_bounding_box: Optional[List[List[float]]] = None """ Coordinates for a rectangular area in which to search. Each bounding box is defined by the two opposite points of its diagonal, and expressed as latitude and longitude pair: `[p1 lat, p1 long, p2 lat, p2 long]`. Provide multiple bounding boxes as nested arrays. For more information, see [rectangular area](https://www.algolia.com/doc/guides/managing-results/refine-results/geolocation/#filtering-inside-rectangular-or-polygonal-areas). """ - inside_polygon: Optional[List[List[float]]] = Field( - default=None, alias="insidePolygon" - ) + inside_polygon: Optional[List[List[float]]] = None """ Coordinates of a polygon in which to search. Polygons are defined by 3 to 10,000 points. Each point is represented by its latitude and longitude. Provide multiple polygons as nested arrays. For more information, see [filtering inside polygons](https://www.algolia.com/doc/guides/managing-results/refine-results/geolocation/#filtering-inside-rectangular-or-polygonal-areas). This parameter is ignored if you also specify `insideBoundingBox`. """ - natural_languages: Optional[List[SupportedLanguage]] = Field( - default=None, alias="naturalLanguages" - ) + natural_languages: Optional[List[SupportedLanguage]] = None """ ISO language codes that adjust settings that are useful for processing natural language queries (as opposed to keyword searches): - Sets `removeStopWords` and `ignorePlurals` to the list of provided languages. - Sets `removeWordsIfNoResults` to `allOptional`. - Adds a `natural_language` attribute to `ruleContexts` and `analyticsTags`. """ - rule_contexts: Optional[List[str]] = Field(default=None, alias="ruleContexts") + rule_contexts: Optional[List[str]] = None """ Assigns a rule context to the search query. [Rule contexts](https://www.algolia.com/doc/guides/managing-results/rules/rules-overview/how-to/customize-search-results-by-platform/#whats-a-context) are strings that you can use to trigger matching rules. """ - personalization_impact: Optional[int] = Field( - default=None, alias="personalizationImpact" - ) + personalization_impact: Optional[int] = None """ Impact that Personalization should have on this search. The higher this value is, the more Personalization determines the ranking compared to other factors. For more information, see [Understanding Personalization impact](https://www.algolia.com/doc/guides/personalization/personalizing-results/in-depth/configuring-personalization/#understanding-personalization-impact). """ - user_token: Optional[str] = Field(default=None, alias="userToken") + user_token: Optional[str] = None """ Unique pseudonymous or anonymous user identifier. This helps with analytics and click and conversion events. For more information, see [user token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - get_ranking_info: Optional[bool] = Field(default=None, alias="getRankingInfo") + get_ranking_info: Optional[bool] = None """ Whether the search response should include detailed ranking information. """ - synonyms: Optional[bool] = Field(default=None, alias="synonyms") + synonyms: Optional[bool] = None """ Whether to take into account an index's synonyms for this search. """ - click_analytics: Optional[bool] = Field(default=None, alias="clickAnalytics") + click_analytics: Optional[bool] = None """ Whether to include a `queryID` attribute in the response. The query ID is a unique identifier for a search query and is required for tracking [click and conversion events](https://www.algolia.com/guides/sending-events/getting-started/). """ - analytics: Optional[bool] = Field(default=None, alias="analytics") + analytics: Optional[bool] = None """ Whether this search will be included in Analytics. """ - analytics_tags: Optional[List[str]] = Field(default=None, alias="analyticsTags") + analytics_tags: Optional[List[str]] = None """ Tags to apply to the query for [segmenting analytics data](https://www.algolia.com/doc/guides/search-analytics/guides/segments/). """ - percentile_computation: Optional[bool] = Field( - default=None, alias="percentileComputation" - ) + percentile_computation: Optional[bool] = None """ Whether to include this search when calculating processing-time percentiles. """ - enable_ab_test: Optional[bool] = Field(default=None, alias="enableABTest") + enable_ab_test: Optional[bool] = None """ Whether to enable A/B testing for this search. """ - attributes_to_retrieve: Optional[List[str]] = Field( - default=None, alias="attributesToRetrieve" - ) + attributes_to_retrieve: Optional[List[str]] = None """ Attributes to include in the API response. To reduce the size of your response, you can retrieve only some of the attributes. Attribute names are case-sensitive. - `*` retrieves all attributes, except attributes included in the `customRanking` and `unretrievableAttributes` settings. - To retrieve all attributes except a specific one, prefix the attribute with a dash and combine it with the `*`: `[\"*\", \"-ATTRIBUTE\"]`. - The `objectID` attribute is always included. """ - ranking: Optional[List[str]] = Field(default=None, alias="ranking") + ranking: Optional[List[str]] = None """ Determines the order in which Algolia returns your results. By default, each entry corresponds to a [ranking criteria](https://www.algolia.com/doc/guides/managing-results/relevance-overview/in-depth/ranking-criteria/). The tie-breaking algorithm sequentially applies each criterion in the order they're specified. If you configure a replica index for [sorting by an attribute](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/how-to/sort-by-attribute/), you put the sorting attribute at the top of the list. **Modifiers** - `asc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in ascending order. - `desc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in descending order. Before you modify the default setting, you should test your changes in the dashboard, and by [A/B testing](https://www.algolia.com/doc/guides/ab-testing/what-is-ab-testing/). """ - custom_ranking: Optional[List[str]] = Field(default=None, alias="customRanking") + custom_ranking: Optional[List[str]] = None """ Attributes to use as [custom ranking](https://www.algolia.com/doc/guides/managing-results/must-do/custom-ranking/). Attribute names are case-sensitive. The custom ranking attributes decide which items are shown first if the other ranking criteria are equal. Records with missing values for your selected custom ranking attributes are always sorted last. Boolean attributes are sorted based on their alphabetical order. **Modifiers** - `asc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in ascending order. - `desc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in descending order. If you use two or more custom ranking attributes, [reduce the precision](https://www.algolia.com/doc/guides/managing-results/must-do/custom-ranking/how-to/controlling-custom-ranking-metrics-precision/) of your first attributes, or the other attributes will never be applied. """ - relevancy_strictness: Optional[int] = Field( - default=None, alias="relevancyStrictness" - ) + relevancy_strictness: Optional[int] = None """ Relevancy threshold below which less relevant results aren't included in the results. You can only set `relevancyStrictness` on [virtual replica indices](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/in-depth/replicas/#what-are-virtual-replicas). Use this setting to strike a balance between the relevance and number of returned results. """ - attributes_to_highlight: Optional[List[str]] = Field( - default=None, alias="attributesToHighlight" - ) + attributes_to_highlight: Optional[List[str]] = None """ Attributes to highlight. By default, all searchable attributes are highlighted. Use `*` to highlight all attributes or use an empty array `[]` to turn off highlighting. Attribute names are case-sensitive. With highlighting, strings that match the search query are surrounded by HTML tags defined by `highlightPreTag` and `highlightPostTag`. You can use this to visually highlight matching parts of a search query in your UI. For more information, see [Highlighting and snippeting](https://www.algolia.com/doc/guides/building-search-ui/ui-and-ux-patterns/highlighting-snippeting/js/). """ - attributes_to_snippet: Optional[List[str]] = Field( - default=None, alias="attributesToSnippet" - ) + attributes_to_snippet: Optional[List[str]] = None """ Attributes for which to enable snippets. Attribute names are case-sensitive. Snippets provide additional context to matched words. If you enable snippets, they include 10 words, including the matched word. The matched word will also be wrapped by HTML tags for highlighting. You can adjust the number of words with the following notation: `ATTRIBUTE:NUMBER`, where `NUMBER` is the number of words to be extracted. """ - highlight_pre_tag: Optional[str] = Field(default=None, alias="highlightPreTag") + highlight_pre_tag: Optional[str] = None """ HTML tag to insert before the highlighted parts in all highlighted results and snippets. """ - highlight_post_tag: Optional[str] = Field(default=None, alias="highlightPostTag") + highlight_post_tag: Optional[str] = None """ HTML tag to insert after the highlighted parts in all highlighted results and snippets. """ - snippet_ellipsis_text: Optional[str] = Field( - default=None, alias="snippetEllipsisText" - ) + snippet_ellipsis_text: Optional[str] = None """ String used as an ellipsis indicator when a snippet is truncated. """ - restrict_highlight_and_snippet_arrays: Optional[bool] = Field( - default=None, alias="restrictHighlightAndSnippetArrays" - ) + restrict_highlight_and_snippet_arrays: Optional[bool] = None """ Whether to restrict highlighting and snippeting to items that at least partially matched the search query. By default, all items are highlighted and snippeted. """ - hits_per_page: Optional[int] = Field(default=None, alias="hitsPerPage") + hits_per_page: Optional[int] = None """ Number of hits per page. """ - min_word_sizefor1_typo: Optional[int] = Field( - default=None, alias="minWordSizefor1Typo" - ) + min_word_sizefor1_typo: Optional[int] = None """ Minimum number of characters a word in the search query must contain to accept matches with [one typo](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/in-depth/configuring-typo-tolerance/#configuring-word-length-for-typos). """ - min_word_sizefor2_typos: Optional[int] = Field( - default=None, alias="minWordSizefor2Typos" - ) + min_word_sizefor2_typos: Optional[int] = None """ Minimum number of characters a word in the search query must contain to accept matches with [two typos](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/in-depth/configuring-typo-tolerance/#configuring-word-length-for-typos). """ - typo_tolerance: Optional[TypoTolerance] = Field(default=None, alias="typoTolerance") - allow_typos_on_numeric_tokens: Optional[bool] = Field( - default=None, alias="allowTyposOnNumericTokens" - ) + typo_tolerance: Optional[TypoTolerance] = None + allow_typos_on_numeric_tokens: Optional[bool] = None """ Whether to allow typos on numbers in the search query. Turn off this setting to reduce the number of irrelevant matches when searching in large sets of similar numbers. """ - disable_typo_tolerance_on_attributes: Optional[List[str]] = Field( - default=None, alias="disableTypoToleranceOnAttributes" - ) + disable_typo_tolerance_on_attributes: Optional[List[str]] = None """ Attributes for which you want to turn off [typo tolerance](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/). Attribute names are case-sensitive. Returning only exact matches can help when: - [Searching in hyphenated attributes](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/how-to/how-to-search-in-hyphenated-attributes/). - Reducing the number of matches when you have too many. This can happen with attributes that are long blocks of text, such as product descriptions. Consider alternatives such as `disableTypoToleranceOnWords` or adding synonyms if your attributes have intentional unusual spellings that might look like typos. """ - ignore_plurals: Optional[IgnorePlurals] = Field(default=None, alias="ignorePlurals") - remove_stop_words: Optional[RemoveStopWords] = Field( - default=None, alias="removeStopWords" - ) - keep_diacritics_on_characters: Optional[str] = Field( - default=None, alias="keepDiacriticsOnCharacters" - ) + ignore_plurals: Optional[IgnorePlurals] = None + remove_stop_words: Optional[RemoveStopWords] = None + keep_diacritics_on_characters: Optional[str] = None """ Characters for which diacritics should be preserved. By default, Algolia removes diacritics from letters. For example, `é` becomes `e`. If this causes issues in your search, you can specify characters that should keep their diacritics. """ - query_languages: Optional[List[SupportedLanguage]] = Field( - default=None, alias="queryLanguages" - ) + query_languages: Optional[List[SupportedLanguage]] = None """ Languages for language-specific query processing steps such as plurals, stop-word removal, and word-detection dictionaries. This setting sets a default list of languages used by the `removeStopWords` and `ignorePlurals` settings. This setting also sets a dictionary for word detection in the logogram-based [CJK](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/normalization/#normalization-for-logogram-based-languages-cjk) languages. To support this, you must place the CJK language **first**. **You should always specify a query language.** If you don't specify an indexing language, the search engine uses all [supported languages](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/supported-languages/), or the languages you specified with the `ignorePlurals` or `removeStopWords` parameters. This can lead to unexpected search results. For more information, see [Language-specific configuration](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/). """ - decompound_query: Optional[bool] = Field(default=None, alias="decompoundQuery") + decompound_query: Optional[bool] = None """ Whether to split compound words in the query into their building blocks. For more information, see [Word segmentation](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/#splitting-compound-words). Word segmentation is supported for these languages: German, Dutch, Finnish, Swedish, and Norwegian. Decompounding doesn't work for words with [non-spacing mark Unicode characters](https://www.charactercodes.net/category/non-spacing_mark). For example, `Gartenstühle` won't be decompounded if the `ü` consists of `u` (U+0075) and `◌̈` (U+0308). """ - enable_rules: Optional[bool] = Field(default=None, alias="enableRules") + enable_rules: Optional[bool] = None """ Whether to enable rules. """ - enable_personalization: Optional[bool] = Field( - default=None, alias="enablePersonalization" - ) + enable_personalization: Optional[bool] = None """ Whether to enable Personalization. """ - query_type: Optional[QueryType] = Field(default=None, alias="queryType") - remove_words_if_no_results: Optional[RemoveWordsIfNoResults] = Field( - default=None, alias="removeWordsIfNoResults" - ) - mode: Optional[Mode] = Field(default=None, alias="mode") - semantic_search: Optional[SemanticSearch] = Field( - default=None, alias="semanticSearch" - ) - advanced_syntax: Optional[bool] = Field(default=None, alias="advancedSyntax") + query_type: Optional[QueryType] = None + remove_words_if_no_results: Optional[RemoveWordsIfNoResults] = None + mode: Optional[Mode] = None + semantic_search: Optional[SemanticSearch] = None + advanced_syntax: Optional[bool] = None """ Whether to support phrase matching and excluding words from search queries. Use the `advancedSyntaxFeatures` parameter to control which feature is supported. """ - optional_words: Optional[List[str]] = Field(default=None, alias="optionalWords") + optional_words: Optional[List[str]] = None """ Words that should be considered optional when found in the query. By default, records must match all words in the search query to be included in the search results. Adding optional words can help to increase the number of search results by running an additional search query that doesn't include the optional words. For example, if the search query is \"action video\" and \"video\" is an optional word, the search engine runs two queries. One for \"action video\" and one for \"action\". Records that match all words are ranked higher. For a search query with 4 or more words **and** all its words are optional, the number of matched words required for a record to be included in the search results increases for every 1,000 records: - If `optionalWords` has less than 10 words, the required number of matched words increases by 1: results 1 to 1,000 require 1 matched word, results 1,001 to 2000 need 2 matched words. - If `optionalWords` has 10 or more words, the number of required matched words increases by the number of optional words dividied by 5 (rounded down). For example, with 18 optional words: results 1 to 1,000 require 1 matched word, results 1,001 to 2000 need 4 matched words. For more information, see [Optional words](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/empty-or-insufficient-results/#creating-a-list-of-optional-words). """ - disable_exact_on_attributes: Optional[List[str]] = Field( - default=None, alias="disableExactOnAttributes" - ) + disable_exact_on_attributes: Optional[List[str]] = None """ Searchable attributes for which you want to [turn off the Exact ranking criterion](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/override-search-engine-defaults/in-depth/adjust-exact-settings/#turn-off-exact-for-some-attributes). Attribute names are case-sensitive. This can be useful for attributes with long values, where the likelyhood of an exact match is high, such as product descriptions. Turning off the Exact ranking criterion for these attributes favors exact matching on other attributes. This reduces the impact of individual attributes with a lot of content on ranking. """ - exact_on_single_word_query: Optional[ExactOnSingleWordQuery] = Field( - default=None, alias="exactOnSingleWordQuery" - ) - alternatives_as_exact: Optional[List[AlternativesAsExact]] = Field( - default=None, alias="alternativesAsExact" - ) + exact_on_single_word_query: Optional[ExactOnSingleWordQuery] = None + alternatives_as_exact: Optional[List[AlternativesAsExact]] = None """ Alternatives of query words that should be considered as exact matches by the Exact ranking criterion. - `ignorePlurals`. Plurals and similar declensions added by the `ignorePlurals` setting are considered exact matches. - `singleWordSynonym`. Single-word synonyms, such as \"NY/NYC\" are considered exact matches. - `multiWordsSynonym`. Multi-word synonyms, such as \"NY/New York\" are considered exact matches. """ - advanced_syntax_features: Optional[List[AdvancedSyntaxFeatures]] = Field( - default=None, alias="advancedSyntaxFeatures" - ) + advanced_syntax_features: Optional[List[AdvancedSyntaxFeatures]] = None """ Advanced search syntax features you want to support. - `exactPhrase`. Phrases in quotes must match exactly. For example, `sparkly blue \"iPhone case\"` only returns records with the exact string \"iPhone case\". - `excludeWords`. Query words prefixed with a `-` must not occur in a record. For example, `search -engine` matches records that contain \"search\" but not \"engine\". This setting only has an effect if `advancedSyntax` is true. """ - distinct: Optional[Distinct] = Field(default=None, alias="distinct") - replace_synonyms_in_highlight: Optional[bool] = Field( - default=None, alias="replaceSynonymsInHighlight" - ) + distinct: Optional[Distinct] = None + replace_synonyms_in_highlight: Optional[bool] = None """ Whether to replace a highlighted word with the matched synonym. By default, the original words are highlighted even if a synonym matches. For example, with `home` as a synonym for `house` and a search for `home`, records matching either \"home\" or \"house\" are included in the search results, and either \"home\" or \"house\" are highlighted. With `replaceSynonymsInHighlight` set to `true`, a search for `home` still matches the same records, but all occurences of \"house\" are replaced by \"home\" in the highlighted response. """ - min_proximity: Optional[int] = Field(default=None, alias="minProximity") + min_proximity: Optional[int] = None """ Minimum proximity score for two matching words. This adjusts the [Proximity ranking criterion](https://www.algolia.com/doc/guides/managing-results/relevance-overview/in-depth/ranking-criteria/#proximity) by equally scoring matches that are farther apart. For example, if `minProximity` is 2, neighboring matches and matches with one word between them would have the same score. """ - response_fields: Optional[List[str]] = Field(default=None, alias="responseFields") + response_fields: Optional[List[str]] = None """ Properties to include in the API response of `search` and `browse` requests. By default, all response properties are included. To reduce the response size, you can select, which attributes should be included. You can't exclude these properties: `message`, `warning`, `cursor`, `serverUsed`, `indexUsed`, `abTestVariantID`, `parsedQuery`, or any property triggered by the `getRankingInfo` parameter. Don't exclude properties that you might need in your search UI. """ - max_facet_hits: Optional[int] = Field(default=None, alias="maxFacetHits") + max_facet_hits: Optional[int] = None """ Maximum number of facet values to return when [searching for facet values](https://www.algolia.com/doc/guides/managing-results/refine-results/faceting/#search-for-facet-values). """ - max_values_per_facet: Optional[int] = Field(default=None, alias="maxValuesPerFacet") + max_values_per_facet: Optional[int] = None """ Maximum number of facet values to return for each facet. """ - sort_facet_values_by: Optional[str] = Field(default=None, alias="sortFacetValuesBy") + sort_facet_values_by: Optional[str] = None """ Order in which to retrieve facet values. - `count`. Facet values are retrieved by decreasing count. The count is the number of matching records containing this facet value. - `alpha`. Retrieve facet values alphabetically. This setting doesn't influence how facet values are displayed in your UI (see `renderingContent`). For more information, see [facet value display](https://www.algolia.com/doc/guides/building-search-ui/ui-and-ux-patterns/facet-display/js/). """ - attribute_criteria_computed_by_min_proximity: Optional[bool] = Field( - default=None, alias="attributeCriteriaComputedByMinProximity" - ) + attribute_criteria_computed_by_min_proximity: Optional[bool] = None """ Whether the best matching attribute should be determined by minimum proximity. This setting only affects ranking if the Attribute ranking criterion comes before Proximity in the `ranking` setting. If true, the best matching attribute is selected based on the minimum proximity of multiple matches. Otherwise, the best matching attribute is determined by the order in the `searchableAttributes` setting. """ - rendering_content: Optional[RenderingContent] = Field( - default=None, alias="renderingContent" - ) - enable_re_ranking: Optional[bool] = Field(default=None, alias="enableReRanking") + rendering_content: Optional[RenderingContent] = None + enable_re_ranking: Optional[bool] = None """ Whether this search will use [Dynamic Re-Ranking](https://www.algolia.com/doc/guides/algolia-ai/re-ranking/). This setting only has an effect if you activated Dynamic Re-Ranking for this index in the Algolia dashboard. """ - re_ranking_apply_filter: Optional[ReRankingApplyFilter] = Field( - default=None, alias="reRankingApplyFilter" - ) - cursor: Optional[str] = Field(default=None, alias="cursor") + re_ranking_apply_filter: Optional[ReRankingApplyFilter] = None + cursor: Optional[str] = None """ Cursor to get the next page of the response. The parameter must match the value returned in the response of a previous request. The last page of the response does not return a `cursor` attribute. """ model_config = ConfigDict( @@ -266,6 +276,7 @@ class BrowseParamsObject(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/browse_response.py b/algoliasearch/search/models/browse_response.py index 5cc1e9bc4..85535869d 100644 --- a/algoliasearch/search/models/browse_response.py +++ b/algoliasearch/search/models/browse_response.py @@ -11,7 +11,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field, field_validator +from pydantic import BaseModel, ConfigDict, field_validator if version_info >= (3, 11): from typing import Self @@ -25,82 +25,114 @@ from algoliasearch.search.models.redirect import Redirect from algoliasearch.search.models.rendering_content import RenderingContent +_ALIASES = { + "ab_test_id": "abTestID", + "ab_test_variant_id": "abTestVariantID", + "around_lat_lng": "aroundLatLng", + "automatic_radius": "automaticRadius", + "exhaustive": "exhaustive", + "exhaustive_facets_count": "exhaustiveFacetsCount", + "exhaustive_nb_hits": "exhaustiveNbHits", + "exhaustive_typo": "exhaustiveTypo", + "facets": "facets", + "facets_stats": "facets_stats", + "index": "index", + "index_used": "indexUsed", + "message": "message", + "nb_sorted_hits": "nbSortedHits", + "parsed_query": "parsedQuery", + "processing_time_ms": "processingTimeMS", + "processing_timings_ms": "processingTimingsMS", + "query_after_removal": "queryAfterRemoval", + "redirect": "redirect", + "rendering_content": "renderingContent", + "server_time_ms": "serverTimeMS", + "server_used": "serverUsed", + "user_data": "userData", + "query_id": "queryID", + "automatic_insights": "_automaticInsights", + "page": "page", + "nb_hits": "nbHits", + "nb_pages": "nbPages", + "hits_per_page": "hitsPerPage", + "hits": "hits", + "query": "query", + "params": "params", + "cursor": "cursor", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class BrowseResponse(BaseModel): """ BrowseResponse """ - ab_test_id: Optional[int] = Field(default=None, alias="abTestID") + ab_test_id: Optional[int] = None """ A/B test ID. This is only included in the response for indices that are part of an A/B test. """ - ab_test_variant_id: Optional[int] = Field(default=None, alias="abTestVariantID") + ab_test_variant_id: Optional[int] = None """ Variant ID. This is only included in the response for indices that are part of an A/B test. """ - around_lat_lng: Optional[str] = Field(default=None, alias="aroundLatLng") + around_lat_lng: Optional[str] = None """ Computed geographical location. """ - automatic_radius: Optional[str] = Field(default=None, alias="automaticRadius") + automatic_radius: Optional[str] = None """ Distance from a central coordinate provided by `aroundLatLng`. """ - exhaustive: Optional[Exhaustive] = Field(default=None, alias="exhaustive") - exhaustive_facets_count: Optional[bool] = Field( - default=None, alias="exhaustiveFacetsCount" - ) + exhaustive: Optional[Exhaustive] = None + exhaustive_facets_count: Optional[bool] = None """ See the `facetsCount` field of the `exhaustive` object in the response. """ - exhaustive_nb_hits: Optional[bool] = Field(default=None, alias="exhaustiveNbHits") + exhaustive_nb_hits: Optional[bool] = None """ See the `nbHits` field of the `exhaustive` object in the response. """ - exhaustive_typo: Optional[bool] = Field(default=None, alias="exhaustiveTypo") + exhaustive_typo: Optional[bool] = None """ See the `typo` field of the `exhaustive` object in the response. """ - facets: Optional[Dict[str, Dict[str, int]]] = Field(default=None, alias="facets") + facets: Optional[Dict[str, Dict[str, int]]] = None """ Facet counts. """ - facets_stats: Optional[Dict[str, FacetStats]] = Field( - default=None, alias="facets_stats" - ) + facets_stats: Optional[Dict[str, FacetStats]] = None """ Statistics for numerical facets. """ - index: Optional[str] = Field(default=None, alias="index") + index: Optional[str] = None """ Index name used for the query. """ - index_used: Optional[str] = Field(default=None, alias="indexUsed") + index_used: Optional[str] = None """ Index name used for the query. During A/B testing, the targeted index isn't always the index used by the query. """ - message: Optional[str] = Field(default=None, alias="message") + message: Optional[str] = None """ Warnings about the query. """ - nb_sorted_hits: Optional[int] = Field(default=None, alias="nbSortedHits") + nb_sorted_hits: Optional[int] = None """ Number of hits selected and sorted by the relevant sort algorithm. """ - parsed_query: Optional[str] = Field(default=None, alias="parsedQuery") + parsed_query: Optional[str] = None """ Post-[normalization](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/#what-does-normalization-mean) query string that will be searched. """ - processing_time_ms: int = Field(alias="processingTimeMS") + processing_time_ms: int """ Time the server took to process the request, in milliseconds. """ - processing_timings_ms: Optional[object] = Field( - default=None, alias="processingTimingsMS" - ) + processing_timings_ms: Optional[object] = None """ Experimental. List of processing steps and their times, in milliseconds. You can use this list to investigate performance issues. """ - query_after_removal: Optional[str] = Field(default=None, alias="queryAfterRemoval") + query_after_removal: Optional[str] = None """ Markup text indicating which parts of the original query have been removed to retrieve a non-empty result set. """ - redirect: Optional[Redirect] = Field(default=None, alias="redirect") - rendering_content: Optional[RenderingContent] = Field( - default=None, alias="renderingContent" - ) - server_time_ms: Optional[int] = Field(default=None, alias="serverTimeMS") + redirect: Optional[Redirect] = None + rendering_content: Optional[RenderingContent] = None + server_time_ms: Optional[int] = None """ Time the server took to process the request, in milliseconds. """ - server_used: Optional[str] = Field(default=None, alias="serverUsed") + server_used: Optional[str] = None """ Host name of the server that processed the request. """ - user_data: Optional[object] = Field(default=None, alias="userData") + user_data: Optional[object] = None """ An object with custom data. You can store up to 32kB as custom data. """ - query_id: Optional[str] = Field(default=None, alias="queryID") + query_id: Optional[str] = None """ Unique identifier for the query. This is used for [click analytics](https://www.algolia.com/doc/guides/analytics/click-analytics/). """ - automatic_insights: Optional[bool] = Field(default=None, alias="_automaticInsights") + automatic_insights: Optional[bool] = None """ Whether automatic events collection is enabled for the application. """ - page: Optional[int] = Field(default=None, alias="page") + page: Optional[int] = None """ Page of search results to retrieve. """ - nb_hits: Optional[int] = Field(default=None, alias="nbHits") + nb_hits: Optional[int] = None """ Number of results (hits). """ - nb_pages: Optional[int] = Field(default=None, alias="nbPages") + nb_pages: Optional[int] = None """ Number of pages of results. """ - hits_per_page: Optional[int] = Field(default=None, alias="hitsPerPage") + hits_per_page: Optional[int] = None """ Number of hits per page. """ - hits: List[Hit] = Field(alias="hits") + hits: List[Hit] """ Search results (hits). Hits are records from your index that match the search criteria, augmented with additional attributes, such as, for highlighting. """ - query: str = Field(alias="query") + query: str """ Search query. """ - params: str = Field(alias="params") + params: str """ URL-encoded string of all search parameters. """ - cursor: Optional[str] = Field(default=None, alias="cursor") + cursor: Optional[str] = None """ Cursor to get the next page of the response. The parameter must match the value returned in the response of a previous request. The last page of the response does not return a `cursor` attribute. """ @field_validator("around_lat_lng") @@ -120,6 +152,7 @@ def around_lat_lng_validate_regular_expression(cls, value): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/built_in_operation.py b/algoliasearch/search/models/built_in_operation.py index ec44b22cb..03b06e49c 100644 --- a/algoliasearch/search/models/built_in_operation.py +++ b/algoliasearch/search/models/built_in_operation.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,20 +21,30 @@ from algoliasearch.search.models.built_in_operation_type import BuiltInOperationType from algoliasearch.search.models.built_in_operation_value import BuiltInOperationValue +_ALIASES = { + "operation": "_operation", + "value": "value", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class BuiltInOperation(BaseModel): """ Update to perform on the attribute. """ - operation: BuiltInOperationType = Field(alias="_operation") - value: BuiltInOperationValue = Field(alias="value") + operation: BuiltInOperationType + value: BuiltInOperationValue model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/built_in_operation_value.py b/algoliasearch/search/models/built_in_operation_value.py index 78a1fa671..20cfdf5db 100644 --- a/algoliasearch/search/models/built_in_operation_value.py +++ b/algoliasearch/search/models/built_in_operation_value.py @@ -27,7 +27,7 @@ class BuiltInOperationValue(BaseModel): """ A string to append or remove for the `Add`, `Remove`, and `AddUnique` operations. """ oneof_schema_2_validator: Optional[int] = Field(default=None) """ A number to add, remove, or append, depending on the operation. """ - actual_instance: Optional[Union[int, str]] = None + actual_instance: Union[int, str, None] = None one_of_schemas: Set[str] = {"int", "str"} def __init__(self, *args, **kwargs) -> None: @@ -40,12 +40,12 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[int, str]]: + def unwrap_actual_instance(self) -> Union[int, str, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -88,9 +88,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -100,8 +100,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], int, str]]: return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/search/models/condition.py b/algoliasearch/search/models/condition.py index d816975bd..3d408c1b8 100644 --- a/algoliasearch/search/models/condition.py +++ b/algoliasearch/search/models/condition.py @@ -11,7 +11,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field, field_validator +from pydantic import BaseModel, ConfigDict, field_validator if version_info >= (3, 11): from typing import Self @@ -21,20 +21,32 @@ from algoliasearch.search.models.anchoring import Anchoring +_ALIASES = { + "pattern": "pattern", + "anchoring": "anchoring", + "alternatives": "alternatives", + "context": "context", + "filters": "filters", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class Condition(BaseModel): """ Condition """ - pattern: Optional[str] = Field(default=None, alias="pattern") + pattern: Optional[str] = None """ Query pattern that triggers the rule. You can use either a literal string, or a special pattern `{facet:ATTRIBUTE}`, where `ATTRIBUTE` is a facet name. The rule is triggered if the query matches the literal string or a value of the specified facet. For example, with `pattern: {facet:genre}`, the rule is triggered when users search for a genre, such as \"comedy\". """ - anchoring: Optional[Anchoring] = Field(default=None, alias="anchoring") - alternatives: Optional[bool] = Field(default=None, alias="alternatives") + anchoring: Optional[Anchoring] = None + alternatives: Optional[bool] = None """ Whether the pattern should match plurals, synonyms, and typos. """ - context: Optional[str] = Field(default=None, alias="context") + context: Optional[str] = None """ An additional restriction that only triggers the rule, when the search has the same value as `ruleContexts` parameter. For example, if `context: mobile`, the rule is only triggered when the search request has a matching `ruleContexts: mobile`. A rule context must only contain alphanumeric characters. """ - filters: Optional[str] = Field(default=None, alias="filters") + filters: Optional[str] = None """ Filters that trigger the rule. You can add add filters using the syntax `facet:value` so that the rule is triggered, when the specific filter is selected. You can use `filters` on its own or combine it with the `pattern` parameter. """ @field_validator("context") @@ -52,6 +64,7 @@ def context_validate_regular_expression(cls, value): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/consequence.py b/algoliasearch/search/models/consequence.py index 5001b1ef0..093785330 100644 --- a/algoliasearch/search/models/consequence.py +++ b/algoliasearch/search/models/consequence.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,20 +22,32 @@ from algoliasearch.search.models.consequence_params import ConsequenceParams from algoliasearch.search.models.promote import Promote +_ALIASES = { + "params": "params", + "promote": "promote", + "filter_promotes": "filterPromotes", + "hide": "hide", + "user_data": "userData", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class Consequence(BaseModel): """ Effect of the rule. For more information, see [Consequences](https://www.algolia.com/doc/guides/managing-results/rules/rules-overview/#consequences). """ - params: Optional[ConsequenceParams] = Field(default=None, alias="params") - promote: Optional[List[Promote]] = Field(default=None, alias="promote") + params: Optional[ConsequenceParams] = None + promote: Optional[List[Promote]] = None """ Records you want to pin to a specific position in the search results. You can promote up to 300 records, either individually, or as groups of up to 100 records each. """ - filter_promotes: Optional[bool] = Field(default=None, alias="filterPromotes") + filter_promotes: Optional[bool] = None """ Whether promoted records must match an active filter for the consequence to be applied. This ensures that user actions (filtering the search) are given a higher precendence. For example, if you promote a record with the `color: red` attribute, and the user filters the search for `color: blue`, the \"red\" record won't be shown. """ - hide: Optional[List[ConsequenceHide]] = Field(default=None, alias="hide") + hide: Optional[List[ConsequenceHide]] = None """ Records you want to hide from the search results. """ - user_data: Optional[object] = Field(default=None, alias="userData") + user_data: Optional[object] = None """ A JSON object with custom data that will be appended to the `userData` array in the response. This object isn't interpreted by the API and is limited to 1 kB of minified JSON. """ model_config = ConfigDict( @@ -43,6 +55,7 @@ class Consequence(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/consequence_hide.py b/algoliasearch/search/models/consequence_hide.py index 47d0fb64a..d9476f18f 100644 --- a/algoliasearch/search/models/consequence_hide.py +++ b/algoliasearch/search/models/consequence_hide.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "object_id": "objectID", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class ConsequenceHide(BaseModel): """ Object ID of the record to hide. """ - object_id: str = Field(alias="objectID") + object_id: str """ Unique record identifier. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class ConsequenceHide(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/consequence_params.py b/algoliasearch/search/models/consequence_params.py index 6770b9b07..0e7ae5444 100644 --- a/algoliasearch/search/models/consequence_params.py +++ b/algoliasearch/search/models/consequence_params.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -45,232 +45,240 @@ from algoliasearch.search.models.tag_filters import TagFilters from algoliasearch.search.models.typo_tolerance import TypoTolerance +_ALIASES = { + "similar_query": "similarQuery", + "filters": "filters", + "facet_filters": "facetFilters", + "optional_filters": "optionalFilters", + "numeric_filters": "numericFilters", + "tag_filters": "tagFilters", + "sum_or_filters_scores": "sumOrFiltersScores", + "restrict_searchable_attributes": "restrictSearchableAttributes", + "facets": "facets", + "faceting_after_distinct": "facetingAfterDistinct", + "page": "page", + "offset": "offset", + "length": "length", + "around_lat_lng": "aroundLatLng", + "around_lat_lng_via_ip": "aroundLatLngViaIP", + "around_radius": "aroundRadius", + "around_precision": "aroundPrecision", + "minimum_around_radius": "minimumAroundRadius", + "inside_bounding_box": "insideBoundingBox", + "inside_polygon": "insidePolygon", + "natural_languages": "naturalLanguages", + "rule_contexts": "ruleContexts", + "personalization_impact": "personalizationImpact", + "user_token": "userToken", + "get_ranking_info": "getRankingInfo", + "synonyms": "synonyms", + "click_analytics": "clickAnalytics", + "analytics": "analytics", + "analytics_tags": "analyticsTags", + "percentile_computation": "percentileComputation", + "enable_ab_test": "enableABTest", + "attributes_to_retrieve": "attributesToRetrieve", + "ranking": "ranking", + "custom_ranking": "customRanking", + "relevancy_strictness": "relevancyStrictness", + "attributes_to_highlight": "attributesToHighlight", + "attributes_to_snippet": "attributesToSnippet", + "highlight_pre_tag": "highlightPreTag", + "highlight_post_tag": "highlightPostTag", + "snippet_ellipsis_text": "snippetEllipsisText", + "restrict_highlight_and_snippet_arrays": "restrictHighlightAndSnippetArrays", + "hits_per_page": "hitsPerPage", + "min_word_sizefor1_typo": "minWordSizefor1Typo", + "min_word_sizefor2_typos": "minWordSizefor2Typos", + "typo_tolerance": "typoTolerance", + "allow_typos_on_numeric_tokens": "allowTyposOnNumericTokens", + "disable_typo_tolerance_on_attributes": "disableTypoToleranceOnAttributes", + "ignore_plurals": "ignorePlurals", + "remove_stop_words": "removeStopWords", + "keep_diacritics_on_characters": "keepDiacriticsOnCharacters", + "query_languages": "queryLanguages", + "decompound_query": "decompoundQuery", + "enable_rules": "enableRules", + "enable_personalization": "enablePersonalization", + "query_type": "queryType", + "remove_words_if_no_results": "removeWordsIfNoResults", + "mode": "mode", + "semantic_search": "semanticSearch", + "advanced_syntax": "advancedSyntax", + "optional_words": "optionalWords", + "disable_exact_on_attributes": "disableExactOnAttributes", + "exact_on_single_word_query": "exactOnSingleWordQuery", + "alternatives_as_exact": "alternativesAsExact", + "advanced_syntax_features": "advancedSyntaxFeatures", + "distinct": "distinct", + "replace_synonyms_in_highlight": "replaceSynonymsInHighlight", + "min_proximity": "minProximity", + "response_fields": "responseFields", + "max_facet_hits": "maxFacetHits", + "max_values_per_facet": "maxValuesPerFacet", + "sort_facet_values_by": "sortFacetValuesBy", + "attribute_criteria_computed_by_min_proximity": "attributeCriteriaComputedByMinProximity", + "rendering_content": "renderingContent", + "enable_re_ranking": "enableReRanking", + "re_ranking_apply_filter": "reRankingApplyFilter", + "query": "query", + "automatic_facet_filters": "automaticFacetFilters", + "automatic_optional_facet_filters": "automaticOptionalFacetFilters", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ConsequenceParams(BaseModel): """ ConsequenceParams """ - similar_query: Optional[str] = Field(default=None, alias="similarQuery") + similar_query: Optional[str] = None """ Keywords to be used instead of the search query to conduct a more broader search. Using the `similarQuery` parameter changes other settings: - `queryType` is set to `prefixNone`. - `removeStopWords` is set to true. - `words` is set as the first ranking criterion. - All remaining words are treated as `optionalWords`. Since the `similarQuery` is supposed to do a broad search, they usually return many results. Combine it with `filters` to narrow down the list of results. """ - filters: Optional[str] = Field(default=None, alias="filters") + filters: Optional[str] = None """ Filter expression to only include items that match the filter criteria in the response. You can use these filter expressions: - **Numeric filters.** ` `, where `` is one of `<`, `<=`, `=`, `!=`, `>`, `>=`. - **Ranges.** `: TO ` where `` and `` are the lower and upper limits of the range (inclusive). - **Facet filters.** `:` where `` is a facet attribute (case-sensitive) and `` a facet value. - **Tag filters.** `_tags:` or just `` (case-sensitive). - **Boolean filters.** `: true | false`. You can combine filters with `AND`, `OR`, and `NOT` operators with the following restrictions: - You can only combine filters of the same type with `OR`. **Not supported:** `facet:value OR num > 3`. - You can't use `NOT` with combinations of filters. **Not supported:** `NOT(facet:value OR facet:value)` - You can't combine conjunctions (`AND`) with `OR`. **Not supported:** `facet:value OR (facet:value AND facet:value)` Use quotes around your filters, if the facet attribute name or facet value has spaces, keywords (`OR`, `AND`, `NOT`), or quotes. If a facet attribute is an array, the filter matches if it matches at least one element of the array. For more information, see [Filters](https://www.algolia.com/doc/guides/managing-results/refine-results/filtering/). """ - facet_filters: Optional[FacetFilters] = Field(default=None, alias="facetFilters") - optional_filters: Optional[OptionalFilters] = Field( - default=None, alias="optionalFilters" - ) - numeric_filters: Optional[NumericFilters] = Field( - default=None, alias="numericFilters" - ) - tag_filters: Optional[TagFilters] = Field(default=None, alias="tagFilters") - sum_or_filters_scores: Optional[bool] = Field( - default=None, alias="sumOrFiltersScores" - ) + facet_filters: Optional[FacetFilters] = None + optional_filters: Optional[OptionalFilters] = None + numeric_filters: Optional[NumericFilters] = None + tag_filters: Optional[TagFilters] = None + sum_or_filters_scores: Optional[bool] = None """ Whether to sum all filter scores. If true, all filter scores are summed. Otherwise, the maximum filter score is kept. For more information, see [filter scores](https://www.algolia.com/doc/guides/managing-results/refine-results/filtering/in-depth/filter-scoring/#accumulating-scores-with-sumorfiltersscores). """ - restrict_searchable_attributes: Optional[List[str]] = Field( - default=None, alias="restrictSearchableAttributes" - ) + restrict_searchable_attributes: Optional[List[str]] = None """ Restricts a search to a subset of your searchable attributes. Attribute names are case-sensitive. """ - facets: Optional[List[str]] = Field(default=None, alias="facets") + facets: Optional[List[str]] = None """ Facets for which to retrieve facet values that match the search criteria and the number of matching facet values. To retrieve all facets, use the wildcard character `*`. For more information, see [facets](https://www.algolia.com/doc/guides/managing-results/refine-results/faceting/#contextual-facet-values-and-counts). """ - faceting_after_distinct: Optional[bool] = Field( - default=None, alias="facetingAfterDistinct" - ) + faceting_after_distinct: Optional[bool] = None """ Whether faceting should be applied after deduplication with `distinct`. This leads to accurate facet counts when using faceting in combination with `distinct`. It's usually better to use `afterDistinct` modifiers in the `attributesForFaceting` setting, as `facetingAfterDistinct` only computes correct facet counts if all records have the same facet values for the `attributeForDistinct`. """ - page: Optional[int] = Field(default=None, alias="page") + page: Optional[int] = None """ Page of search results to retrieve. """ - offset: Optional[int] = Field(default=None, alias="offset") + offset: Optional[int] = None """ Position of the first hit to retrieve. """ - length: Optional[int] = Field(default=None, alias="length") + length: Optional[int] = None """ Number of hits to retrieve (used in combination with `offset`). """ - around_lat_lng: Optional[str] = Field(default=None, alias="aroundLatLng") + around_lat_lng: Optional[str] = None """ Coordinates for the center of a circle, expressed as a comma-separated string of latitude and longitude. Only records included within circle around this central location are included in the results. The radius of the circle is determined by the `aroundRadius` and `minimumAroundRadius` settings. This parameter is ignored if you also specify `insidePolygon` or `insideBoundingBox`. """ - around_lat_lng_via_ip: Optional[bool] = Field( - default=None, alias="aroundLatLngViaIP" - ) + around_lat_lng_via_ip: Optional[bool] = None """ Whether to obtain the coordinates from the request's IP address. """ - around_radius: Optional[AroundRadius] = Field(default=None, alias="aroundRadius") - around_precision: Optional[AroundPrecision] = Field( - default=None, alias="aroundPrecision" - ) - minimum_around_radius: Optional[int] = Field( - default=None, alias="minimumAroundRadius" - ) + around_radius: Optional[AroundRadius] = None + around_precision: Optional[AroundPrecision] = None + minimum_around_radius: Optional[int] = None """ Minimum radius (in meters) for a search around a location when `aroundRadius` isn't set. """ - inside_bounding_box: Optional[List[List[float]]] = Field( - default=None, alias="insideBoundingBox" - ) + inside_bounding_box: Optional[List[List[float]]] = None """ Coordinates for a rectangular area in which to search. Each bounding box is defined by the two opposite points of its diagonal, and expressed as latitude and longitude pair: `[p1 lat, p1 long, p2 lat, p2 long]`. Provide multiple bounding boxes as nested arrays. For more information, see [rectangular area](https://www.algolia.com/doc/guides/managing-results/refine-results/geolocation/#filtering-inside-rectangular-or-polygonal-areas). """ - inside_polygon: Optional[List[List[float]]] = Field( - default=None, alias="insidePolygon" - ) + inside_polygon: Optional[List[List[float]]] = None """ Coordinates of a polygon in which to search. Polygons are defined by 3 to 10,000 points. Each point is represented by its latitude and longitude. Provide multiple polygons as nested arrays. For more information, see [filtering inside polygons](https://www.algolia.com/doc/guides/managing-results/refine-results/geolocation/#filtering-inside-rectangular-or-polygonal-areas). This parameter is ignored if you also specify `insideBoundingBox`. """ - natural_languages: Optional[List[SupportedLanguage]] = Field( - default=None, alias="naturalLanguages" - ) + natural_languages: Optional[List[SupportedLanguage]] = None """ ISO language codes that adjust settings that are useful for processing natural language queries (as opposed to keyword searches): - Sets `removeStopWords` and `ignorePlurals` to the list of provided languages. - Sets `removeWordsIfNoResults` to `allOptional`. - Adds a `natural_language` attribute to `ruleContexts` and `analyticsTags`. """ - rule_contexts: Optional[List[str]] = Field(default=None, alias="ruleContexts") + rule_contexts: Optional[List[str]] = None """ Assigns a rule context to the search query. [Rule contexts](https://www.algolia.com/doc/guides/managing-results/rules/rules-overview/how-to/customize-search-results-by-platform/#whats-a-context) are strings that you can use to trigger matching rules. """ - personalization_impact: Optional[int] = Field( - default=None, alias="personalizationImpact" - ) + personalization_impact: Optional[int] = None """ Impact that Personalization should have on this search. The higher this value is, the more Personalization determines the ranking compared to other factors. For more information, see [Understanding Personalization impact](https://www.algolia.com/doc/guides/personalization/personalizing-results/in-depth/configuring-personalization/#understanding-personalization-impact). """ - user_token: Optional[str] = Field(default=None, alias="userToken") + user_token: Optional[str] = None """ Unique pseudonymous or anonymous user identifier. This helps with analytics and click and conversion events. For more information, see [user token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - get_ranking_info: Optional[bool] = Field(default=None, alias="getRankingInfo") + get_ranking_info: Optional[bool] = None """ Whether the search response should include detailed ranking information. """ - synonyms: Optional[bool] = Field(default=None, alias="synonyms") + synonyms: Optional[bool] = None """ Whether to take into account an index's synonyms for this search. """ - click_analytics: Optional[bool] = Field(default=None, alias="clickAnalytics") + click_analytics: Optional[bool] = None """ Whether to include a `queryID` attribute in the response. The query ID is a unique identifier for a search query and is required for tracking [click and conversion events](https://www.algolia.com/guides/sending-events/getting-started/). """ - analytics: Optional[bool] = Field(default=None, alias="analytics") + analytics: Optional[bool] = None """ Whether this search will be included in Analytics. """ - analytics_tags: Optional[List[str]] = Field(default=None, alias="analyticsTags") + analytics_tags: Optional[List[str]] = None """ Tags to apply to the query for [segmenting analytics data](https://www.algolia.com/doc/guides/search-analytics/guides/segments/). """ - percentile_computation: Optional[bool] = Field( - default=None, alias="percentileComputation" - ) + percentile_computation: Optional[bool] = None """ Whether to include this search when calculating processing-time percentiles. """ - enable_ab_test: Optional[bool] = Field(default=None, alias="enableABTest") + enable_ab_test: Optional[bool] = None """ Whether to enable A/B testing for this search. """ - attributes_to_retrieve: Optional[List[str]] = Field( - default=None, alias="attributesToRetrieve" - ) + attributes_to_retrieve: Optional[List[str]] = None """ Attributes to include in the API response. To reduce the size of your response, you can retrieve only some of the attributes. Attribute names are case-sensitive. - `*` retrieves all attributes, except attributes included in the `customRanking` and `unretrievableAttributes` settings. - To retrieve all attributes except a specific one, prefix the attribute with a dash and combine it with the `*`: `[\"*\", \"-ATTRIBUTE\"]`. - The `objectID` attribute is always included. """ - ranking: Optional[List[str]] = Field(default=None, alias="ranking") + ranking: Optional[List[str]] = None """ Determines the order in which Algolia returns your results. By default, each entry corresponds to a [ranking criteria](https://www.algolia.com/doc/guides/managing-results/relevance-overview/in-depth/ranking-criteria/). The tie-breaking algorithm sequentially applies each criterion in the order they're specified. If you configure a replica index for [sorting by an attribute](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/how-to/sort-by-attribute/), you put the sorting attribute at the top of the list. **Modifiers** - `asc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in ascending order. - `desc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in descending order. Before you modify the default setting, you should test your changes in the dashboard, and by [A/B testing](https://www.algolia.com/doc/guides/ab-testing/what-is-ab-testing/). """ - custom_ranking: Optional[List[str]] = Field(default=None, alias="customRanking") + custom_ranking: Optional[List[str]] = None """ Attributes to use as [custom ranking](https://www.algolia.com/doc/guides/managing-results/must-do/custom-ranking/). Attribute names are case-sensitive. The custom ranking attributes decide which items are shown first if the other ranking criteria are equal. Records with missing values for your selected custom ranking attributes are always sorted last. Boolean attributes are sorted based on their alphabetical order. **Modifiers** - `asc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in ascending order. - `desc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in descending order. If you use two or more custom ranking attributes, [reduce the precision](https://www.algolia.com/doc/guides/managing-results/must-do/custom-ranking/how-to/controlling-custom-ranking-metrics-precision/) of your first attributes, or the other attributes will never be applied. """ - relevancy_strictness: Optional[int] = Field( - default=None, alias="relevancyStrictness" - ) + relevancy_strictness: Optional[int] = None """ Relevancy threshold below which less relevant results aren't included in the results. You can only set `relevancyStrictness` on [virtual replica indices](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/in-depth/replicas/#what-are-virtual-replicas). Use this setting to strike a balance between the relevance and number of returned results. """ - attributes_to_highlight: Optional[List[str]] = Field( - default=None, alias="attributesToHighlight" - ) + attributes_to_highlight: Optional[List[str]] = None """ Attributes to highlight. By default, all searchable attributes are highlighted. Use `*` to highlight all attributes or use an empty array `[]` to turn off highlighting. Attribute names are case-sensitive. With highlighting, strings that match the search query are surrounded by HTML tags defined by `highlightPreTag` and `highlightPostTag`. You can use this to visually highlight matching parts of a search query in your UI. For more information, see [Highlighting and snippeting](https://www.algolia.com/doc/guides/building-search-ui/ui-and-ux-patterns/highlighting-snippeting/js/). """ - attributes_to_snippet: Optional[List[str]] = Field( - default=None, alias="attributesToSnippet" - ) + attributes_to_snippet: Optional[List[str]] = None """ Attributes for which to enable snippets. Attribute names are case-sensitive. Snippets provide additional context to matched words. If you enable snippets, they include 10 words, including the matched word. The matched word will also be wrapped by HTML tags for highlighting. You can adjust the number of words with the following notation: `ATTRIBUTE:NUMBER`, where `NUMBER` is the number of words to be extracted. """ - highlight_pre_tag: Optional[str] = Field(default=None, alias="highlightPreTag") + highlight_pre_tag: Optional[str] = None """ HTML tag to insert before the highlighted parts in all highlighted results and snippets. """ - highlight_post_tag: Optional[str] = Field(default=None, alias="highlightPostTag") + highlight_post_tag: Optional[str] = None """ HTML tag to insert after the highlighted parts in all highlighted results and snippets. """ - snippet_ellipsis_text: Optional[str] = Field( - default=None, alias="snippetEllipsisText" - ) + snippet_ellipsis_text: Optional[str] = None """ String used as an ellipsis indicator when a snippet is truncated. """ - restrict_highlight_and_snippet_arrays: Optional[bool] = Field( - default=None, alias="restrictHighlightAndSnippetArrays" - ) + restrict_highlight_and_snippet_arrays: Optional[bool] = None """ Whether to restrict highlighting and snippeting to items that at least partially matched the search query. By default, all items are highlighted and snippeted. """ - hits_per_page: Optional[int] = Field(default=None, alias="hitsPerPage") + hits_per_page: Optional[int] = None """ Number of hits per page. """ - min_word_sizefor1_typo: Optional[int] = Field( - default=None, alias="minWordSizefor1Typo" - ) + min_word_sizefor1_typo: Optional[int] = None """ Minimum number of characters a word in the search query must contain to accept matches with [one typo](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/in-depth/configuring-typo-tolerance/#configuring-word-length-for-typos). """ - min_word_sizefor2_typos: Optional[int] = Field( - default=None, alias="minWordSizefor2Typos" - ) + min_word_sizefor2_typos: Optional[int] = None """ Minimum number of characters a word in the search query must contain to accept matches with [two typos](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/in-depth/configuring-typo-tolerance/#configuring-word-length-for-typos). """ - typo_tolerance: Optional[TypoTolerance] = Field(default=None, alias="typoTolerance") - allow_typos_on_numeric_tokens: Optional[bool] = Field( - default=None, alias="allowTyposOnNumericTokens" - ) + typo_tolerance: Optional[TypoTolerance] = None + allow_typos_on_numeric_tokens: Optional[bool] = None """ Whether to allow typos on numbers in the search query. Turn off this setting to reduce the number of irrelevant matches when searching in large sets of similar numbers. """ - disable_typo_tolerance_on_attributes: Optional[List[str]] = Field( - default=None, alias="disableTypoToleranceOnAttributes" - ) + disable_typo_tolerance_on_attributes: Optional[List[str]] = None """ Attributes for which you want to turn off [typo tolerance](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/). Attribute names are case-sensitive. Returning only exact matches can help when: - [Searching in hyphenated attributes](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/how-to/how-to-search-in-hyphenated-attributes/). - Reducing the number of matches when you have too many. This can happen with attributes that are long blocks of text, such as product descriptions. Consider alternatives such as `disableTypoToleranceOnWords` or adding synonyms if your attributes have intentional unusual spellings that might look like typos. """ - ignore_plurals: Optional[IgnorePlurals] = Field(default=None, alias="ignorePlurals") - remove_stop_words: Optional[RemoveStopWords] = Field( - default=None, alias="removeStopWords" - ) - keep_diacritics_on_characters: Optional[str] = Field( - default=None, alias="keepDiacriticsOnCharacters" - ) + ignore_plurals: Optional[IgnorePlurals] = None + remove_stop_words: Optional[RemoveStopWords] = None + keep_diacritics_on_characters: Optional[str] = None """ Characters for which diacritics should be preserved. By default, Algolia removes diacritics from letters. For example, `é` becomes `e`. If this causes issues in your search, you can specify characters that should keep their diacritics. """ - query_languages: Optional[List[SupportedLanguage]] = Field( - default=None, alias="queryLanguages" - ) + query_languages: Optional[List[SupportedLanguage]] = None """ Languages for language-specific query processing steps such as plurals, stop-word removal, and word-detection dictionaries. This setting sets a default list of languages used by the `removeStopWords` and `ignorePlurals` settings. This setting also sets a dictionary for word detection in the logogram-based [CJK](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/normalization/#normalization-for-logogram-based-languages-cjk) languages. To support this, you must place the CJK language **first**. **You should always specify a query language.** If you don't specify an indexing language, the search engine uses all [supported languages](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/supported-languages/), or the languages you specified with the `ignorePlurals` or `removeStopWords` parameters. This can lead to unexpected search results. For more information, see [Language-specific configuration](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/). """ - decompound_query: Optional[bool] = Field(default=None, alias="decompoundQuery") + decompound_query: Optional[bool] = None """ Whether to split compound words in the query into their building blocks. For more information, see [Word segmentation](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/#splitting-compound-words). Word segmentation is supported for these languages: German, Dutch, Finnish, Swedish, and Norwegian. Decompounding doesn't work for words with [non-spacing mark Unicode characters](https://www.charactercodes.net/category/non-spacing_mark). For example, `Gartenstühle` won't be decompounded if the `ü` consists of `u` (U+0075) and `◌̈` (U+0308). """ - enable_rules: Optional[bool] = Field(default=None, alias="enableRules") + enable_rules: Optional[bool] = None """ Whether to enable rules. """ - enable_personalization: Optional[bool] = Field( - default=None, alias="enablePersonalization" - ) + enable_personalization: Optional[bool] = None """ Whether to enable Personalization. """ - query_type: Optional[QueryType] = Field(default=None, alias="queryType") - remove_words_if_no_results: Optional[RemoveWordsIfNoResults] = Field( - default=None, alias="removeWordsIfNoResults" - ) - mode: Optional[Mode] = Field(default=None, alias="mode") - semantic_search: Optional[SemanticSearch] = Field( - default=None, alias="semanticSearch" - ) - advanced_syntax: Optional[bool] = Field(default=None, alias="advancedSyntax") + query_type: Optional[QueryType] = None + remove_words_if_no_results: Optional[RemoveWordsIfNoResults] = None + mode: Optional[Mode] = None + semantic_search: Optional[SemanticSearch] = None + advanced_syntax: Optional[bool] = None """ Whether to support phrase matching and excluding words from search queries. Use the `advancedSyntaxFeatures` parameter to control which feature is supported. """ - optional_words: Optional[List[str]] = Field(default=None, alias="optionalWords") + optional_words: Optional[List[str]] = None """ Words that should be considered optional when found in the query. By default, records must match all words in the search query to be included in the search results. Adding optional words can help to increase the number of search results by running an additional search query that doesn't include the optional words. For example, if the search query is \"action video\" and \"video\" is an optional word, the search engine runs two queries. One for \"action video\" and one for \"action\". Records that match all words are ranked higher. For a search query with 4 or more words **and** all its words are optional, the number of matched words required for a record to be included in the search results increases for every 1,000 records: - If `optionalWords` has less than 10 words, the required number of matched words increases by 1: results 1 to 1,000 require 1 matched word, results 1,001 to 2000 need 2 matched words. - If `optionalWords` has 10 or more words, the number of required matched words increases by the number of optional words dividied by 5 (rounded down). For example, with 18 optional words: results 1 to 1,000 require 1 matched word, results 1,001 to 2000 need 4 matched words. For more information, see [Optional words](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/empty-or-insufficient-results/#creating-a-list-of-optional-words). """ - disable_exact_on_attributes: Optional[List[str]] = Field( - default=None, alias="disableExactOnAttributes" - ) + disable_exact_on_attributes: Optional[List[str]] = None """ Searchable attributes for which you want to [turn off the Exact ranking criterion](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/override-search-engine-defaults/in-depth/adjust-exact-settings/#turn-off-exact-for-some-attributes). Attribute names are case-sensitive. This can be useful for attributes with long values, where the likelyhood of an exact match is high, such as product descriptions. Turning off the Exact ranking criterion for these attributes favors exact matching on other attributes. This reduces the impact of individual attributes with a lot of content on ranking. """ - exact_on_single_word_query: Optional[ExactOnSingleWordQuery] = Field( - default=None, alias="exactOnSingleWordQuery" - ) - alternatives_as_exact: Optional[List[AlternativesAsExact]] = Field( - default=None, alias="alternativesAsExact" - ) + exact_on_single_word_query: Optional[ExactOnSingleWordQuery] = None + alternatives_as_exact: Optional[List[AlternativesAsExact]] = None """ Alternatives of query words that should be considered as exact matches by the Exact ranking criterion. - `ignorePlurals`. Plurals and similar declensions added by the `ignorePlurals` setting are considered exact matches. - `singleWordSynonym`. Single-word synonyms, such as \"NY/NYC\" are considered exact matches. - `multiWordsSynonym`. Multi-word synonyms, such as \"NY/New York\" are considered exact matches. """ - advanced_syntax_features: Optional[List[AdvancedSyntaxFeatures]] = Field( - default=None, alias="advancedSyntaxFeatures" - ) + advanced_syntax_features: Optional[List[AdvancedSyntaxFeatures]] = None """ Advanced search syntax features you want to support. - `exactPhrase`. Phrases in quotes must match exactly. For example, `sparkly blue \"iPhone case\"` only returns records with the exact string \"iPhone case\". - `excludeWords`. Query words prefixed with a `-` must not occur in a record. For example, `search -engine` matches records that contain \"search\" but not \"engine\". This setting only has an effect if `advancedSyntax` is true. """ - distinct: Optional[Distinct] = Field(default=None, alias="distinct") - replace_synonyms_in_highlight: Optional[bool] = Field( - default=None, alias="replaceSynonymsInHighlight" - ) + distinct: Optional[Distinct] = None + replace_synonyms_in_highlight: Optional[bool] = None """ Whether to replace a highlighted word with the matched synonym. By default, the original words are highlighted even if a synonym matches. For example, with `home` as a synonym for `house` and a search for `home`, records matching either \"home\" or \"house\" are included in the search results, and either \"home\" or \"house\" are highlighted. With `replaceSynonymsInHighlight` set to `true`, a search for `home` still matches the same records, but all occurences of \"house\" are replaced by \"home\" in the highlighted response. """ - min_proximity: Optional[int] = Field(default=None, alias="minProximity") + min_proximity: Optional[int] = None """ Minimum proximity score for two matching words. This adjusts the [Proximity ranking criterion](https://www.algolia.com/doc/guides/managing-results/relevance-overview/in-depth/ranking-criteria/#proximity) by equally scoring matches that are farther apart. For example, if `minProximity` is 2, neighboring matches and matches with one word between them would have the same score. """ - response_fields: Optional[List[str]] = Field(default=None, alias="responseFields") + response_fields: Optional[List[str]] = None """ Properties to include in the API response of `search` and `browse` requests. By default, all response properties are included. To reduce the response size, you can select, which attributes should be included. You can't exclude these properties: `message`, `warning`, `cursor`, `serverUsed`, `indexUsed`, `abTestVariantID`, `parsedQuery`, or any property triggered by the `getRankingInfo` parameter. Don't exclude properties that you might need in your search UI. """ - max_facet_hits: Optional[int] = Field(default=None, alias="maxFacetHits") + max_facet_hits: Optional[int] = None """ Maximum number of facet values to return when [searching for facet values](https://www.algolia.com/doc/guides/managing-results/refine-results/faceting/#search-for-facet-values). """ - max_values_per_facet: Optional[int] = Field(default=None, alias="maxValuesPerFacet") + max_values_per_facet: Optional[int] = None """ Maximum number of facet values to return for each facet. """ - sort_facet_values_by: Optional[str] = Field(default=None, alias="sortFacetValuesBy") + sort_facet_values_by: Optional[str] = None """ Order in which to retrieve facet values. - `count`. Facet values are retrieved by decreasing count. The count is the number of matching records containing this facet value. - `alpha`. Retrieve facet values alphabetically. This setting doesn't influence how facet values are displayed in your UI (see `renderingContent`). For more information, see [facet value display](https://www.algolia.com/doc/guides/building-search-ui/ui-and-ux-patterns/facet-display/js/). """ - attribute_criteria_computed_by_min_proximity: Optional[bool] = Field( - default=None, alias="attributeCriteriaComputedByMinProximity" - ) + attribute_criteria_computed_by_min_proximity: Optional[bool] = None """ Whether the best matching attribute should be determined by minimum proximity. This setting only affects ranking if the Attribute ranking criterion comes before Proximity in the `ranking` setting. If true, the best matching attribute is selected based on the minimum proximity of multiple matches. Otherwise, the best matching attribute is determined by the order in the `searchableAttributes` setting. """ - rendering_content: Optional[RenderingContent] = Field( - default=None, alias="renderingContent" - ) - enable_re_ranking: Optional[bool] = Field(default=None, alias="enableReRanking") + rendering_content: Optional[RenderingContent] = None + enable_re_ranking: Optional[bool] = None """ Whether this search will use [Dynamic Re-Ranking](https://www.algolia.com/doc/guides/algolia-ai/re-ranking/). This setting only has an effect if you activated Dynamic Re-Ranking for this index in the Algolia dashboard. """ - re_ranking_apply_filter: Optional[ReRankingApplyFilter] = Field( - default=None, alias="reRankingApplyFilter" - ) - query: Optional[ConsequenceQuery] = Field(default=None, alias="query") - automatic_facet_filters: Optional[AutomaticFacetFilters] = Field( - default=None, alias="automaticFacetFilters" - ) - automatic_optional_facet_filters: Optional[AutomaticFacetFilters] = Field( - default=None, alias="automaticOptionalFacetFilters" - ) + re_ranking_apply_filter: Optional[ReRankingApplyFilter] = None + query: Optional[ConsequenceQuery] = None + automatic_facet_filters: Optional[AutomaticFacetFilters] = None + automatic_optional_facet_filters: Optional[AutomaticFacetFilters] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/consequence_query.py b/algoliasearch/search/models/consequence_query.py index 9c7537290..d3d3cac8b 100644 --- a/algoliasearch/search/models/consequence_query.py +++ b/algoliasearch/search/models/consequence_query.py @@ -30,7 +30,7 @@ class ConsequenceQuery(BaseModel): oneof_schema_2_validator: Optional[str] = Field(default=None) - actual_instance: Optional[Union[ConsequenceQueryObject, str]] = None + actual_instance: Union[ConsequenceQueryObject, str, None] = None one_of_schemas: Set[str] = {"ConsequenceQueryObject", "str"} def __init__(self, *args, **kwargs) -> None: @@ -43,12 +43,12 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[ConsequenceQueryObject, str]]: + def unwrap_actual_instance(self) -> Union[ConsequenceQueryObject, str, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -90,9 +90,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -102,8 +102,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], ConsequenceQueryObject, str] return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/search/models/consequence_query_object.py b/algoliasearch/search/models/consequence_query_object.py index 044076949..8f3ae2e0c 100644 --- a/algoliasearch/search/models/consequence_query_object.py +++ b/algoliasearch/search/models/consequence_query_object.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,15 +20,24 @@ from algoliasearch.search.models.edit import Edit +_ALIASES = { + "remove": "remove", + "edits": "edits", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ConsequenceQueryObject(BaseModel): """ ConsequenceQueryObject """ - remove: Optional[List[str]] = Field(default=None, alias="remove") + remove: Optional[List[str]] = None """ Words to remove from the search query. """ - edits: Optional[List[Edit]] = Field(default=None, alias="edits") + edits: Optional[List[Edit]] = None """ Changes to make to the search query. """ model_config = ConfigDict( @@ -36,6 +45,7 @@ class ConsequenceQueryObject(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/created_at_response.py b/algoliasearch/search/models/created_at_response.py index f7e63ab05..3f15458ba 100644 --- a/algoliasearch/search/models/created_at_response.py +++ b/algoliasearch/search/models/created_at_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "created_at": "createdAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class CreatedAtResponse(BaseModel): """ Response and creation timestamp. """ - created_at: str = Field(alias="createdAt") + created_at: str """ Date and time when the object was created, in RFC 3339 format. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class CreatedAtResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/delete_api_key_response.py b/algoliasearch/search/models/delete_api_key_response.py index ad6459f83..001ae70fb 100644 --- a/algoliasearch/search/models/delete_api_key_response.py +++ b/algoliasearch/search/models/delete_api_key_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "deleted_at": "deletedAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class DeleteApiKeyResponse(BaseModel): """ DeleteApiKeyResponse """ - deleted_at: str = Field(alias="deletedAt") + deleted_at: str """ Date and time when the object was deleted, in RFC 3339 format. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class DeleteApiKeyResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/delete_by_params.py b/algoliasearch/search/models/delete_by_params.py index d4589d219..27178a2b1 100644 --- a/algoliasearch/search/models/delete_by_params.py +++ b/algoliasearch/search/models/delete_by_params.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -23,29 +23,38 @@ from algoliasearch.search.models.numeric_filters import NumericFilters from algoliasearch.search.models.tag_filters import TagFilters +_ALIASES = { + "facet_filters": "facetFilters", + "filters": "filters", + "numeric_filters": "numericFilters", + "tag_filters": "tagFilters", + "around_lat_lng": "aroundLatLng", + "around_radius": "aroundRadius", + "inside_bounding_box": "insideBoundingBox", + "inside_polygon": "insidePolygon", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class DeleteByParams(BaseModel): """ DeleteByParams """ - facet_filters: Optional[FacetFilters] = Field(default=None, alias="facetFilters") - filters: Optional[str] = Field(default=None, alias="filters") + facet_filters: Optional[FacetFilters] = None + filters: Optional[str] = None """ Filter expression to only include items that match the filter criteria in the response. You can use these filter expressions: - **Numeric filters.** ` `, where `` is one of `<`, `<=`, `=`, `!=`, `>`, `>=`. - **Ranges.** `: TO ` where `` and `` are the lower and upper limits of the range (inclusive). - **Facet filters.** `:` where `` is a facet attribute (case-sensitive) and `` a facet value. - **Tag filters.** `_tags:` or just `` (case-sensitive). - **Boolean filters.** `: true | false`. You can combine filters with `AND`, `OR`, and `NOT` operators with the following restrictions: - You can only combine filters of the same type with `OR`. **Not supported:** `facet:value OR num > 3`. - You can't use `NOT` with combinations of filters. **Not supported:** `NOT(facet:value OR facet:value)` - You can't combine conjunctions (`AND`) with `OR`. **Not supported:** `facet:value OR (facet:value AND facet:value)` Use quotes around your filters, if the facet attribute name or facet value has spaces, keywords (`OR`, `AND`, `NOT`), or quotes. If a facet attribute is an array, the filter matches if it matches at least one element of the array. For more information, see [Filters](https://www.algolia.com/doc/guides/managing-results/refine-results/filtering/). """ - numeric_filters: Optional[NumericFilters] = Field( - default=None, alias="numericFilters" - ) - tag_filters: Optional[TagFilters] = Field(default=None, alias="tagFilters") - around_lat_lng: Optional[str] = Field(default=None, alias="aroundLatLng") + numeric_filters: Optional[NumericFilters] = None + tag_filters: Optional[TagFilters] = None + around_lat_lng: Optional[str] = None """ Coordinates for the center of a circle, expressed as a comma-separated string of latitude and longitude. Only records included within circle around this central location are included in the results. The radius of the circle is determined by the `aroundRadius` and `minimumAroundRadius` settings. This parameter is ignored if you also specify `insidePolygon` or `insideBoundingBox`. """ - around_radius: Optional[AroundRadius] = Field(default=None, alias="aroundRadius") - inside_bounding_box: Optional[List[List[float]]] = Field( - default=None, alias="insideBoundingBox" - ) + around_radius: Optional[AroundRadius] = None + inside_bounding_box: Optional[List[List[float]]] = None """ Coordinates for a rectangular area in which to search. Each bounding box is defined by the two opposite points of its diagonal, and expressed as latitude and longitude pair: `[p1 lat, p1 long, p2 lat, p2 long]`. Provide multiple bounding boxes as nested arrays. For more information, see [rectangular area](https://www.algolia.com/doc/guides/managing-results/refine-results/geolocation/#filtering-inside-rectangular-or-polygonal-areas). """ - inside_polygon: Optional[List[List[float]]] = Field( - default=None, alias="insidePolygon" - ) + inside_polygon: Optional[List[List[float]]] = None """ Coordinates of a polygon in which to search. Polygons are defined by 3 to 10,000 points. Each point is represented by its latitude and longitude. Provide multiple polygons as nested arrays. For more information, see [filtering inside polygons](https://www.algolia.com/doc/guides/managing-results/refine-results/geolocation/#filtering-inside-rectangular-or-polygonal-areas). This parameter is ignored if you also specify `insideBoundingBox`. """ model_config = ConfigDict( @@ -53,6 +62,7 @@ class DeleteByParams(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/delete_source_response.py b/algoliasearch/search/models/delete_source_response.py index 9cade06a0..ac9999b42 100644 --- a/algoliasearch/search/models/delete_source_response.py +++ b/algoliasearch/search/models/delete_source_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "deleted_at": "deletedAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class DeleteSourceResponse(BaseModel): """ DeleteSourceResponse """ - deleted_at: str = Field(alias="deletedAt") + deleted_at: str """ Date and time when the object was deleted, in RFC 3339 format. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class DeleteSourceResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/deleted_at_response.py b/algoliasearch/search/models/deleted_at_response.py index 5de986f76..c720da065 100644 --- a/algoliasearch/search/models/deleted_at_response.py +++ b/algoliasearch/search/models/deleted_at_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "task_id": "taskID", + "deleted_at": "deletedAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class DeletedAtResponse(BaseModel): """ Response, taskID, and deletion timestamp. """ - task_id: int = Field(alias="taskID") + task_id: int """ Unique identifier of a task. A successful API response means that a task was added to a queue. It might not run immediately. You can check the task's progress with the [`task` operation](#tag/Indices/operation/getTask) and this `taskID`. """ - deleted_at: str = Field(alias="deletedAt") + deleted_at: str """ Date and time when the object was deleted, in RFC 3339 format. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class DeletedAtResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/dictionary_entry.py b/algoliasearch/search/models/dictionary_entry.py index dffa8b31b..3efc5d39e 100644 --- a/algoliasearch/search/models/dictionary_entry.py +++ b/algoliasearch/search/models/dictionary_entry.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,29 +22,44 @@ from algoliasearch.search.models.dictionary_entry_type import DictionaryEntryType from algoliasearch.search.models.supported_language import SupportedLanguage +_ALIASES = { + "object_id": "objectID", + "language": "language", + "word": "word", + "words": "words", + "decomposition": "decomposition", + "state": "state", + "type": "type", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class DictionaryEntry(BaseModel): """ Dictionary entry. """ - object_id: str = Field(alias="objectID") + object_id: str """ Unique identifier for the dictionary entry. """ - language: Optional[SupportedLanguage] = Field(default=None, alias="language") - word: Optional[str] = Field(default=None, alias="word") + language: Optional[SupportedLanguage] = None + word: Optional[str] = None """ Matching dictionary word for `stopwords` and `compounds` dictionaries. """ - words: Optional[List[str]] = Field(default=None, alias="words") + words: Optional[List[str]] = None """ Matching words in the `plurals` dictionary including declensions. """ - decomposition: Optional[List[str]] = Field(default=None, alias="decomposition") + decomposition: Optional[List[str]] = None """ Invividual components of a compound word in the `compounds` dictionary. """ - state: Optional[DictionaryEntryState] = Field(default=None, alias="state") - type: Optional[DictionaryEntryType] = Field(default=None, alias="type") + state: Optional[DictionaryEntryState] = None + type: Optional[DictionaryEntryType] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, extra="allow", ) diff --git a/algoliasearch/search/models/dictionary_language.py b/algoliasearch/search/models/dictionary_language.py index 5d8781715..fbb98790c 100644 --- a/algoliasearch/search/models/dictionary_language.py +++ b/algoliasearch/search/models/dictionary_language.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "nb_custom_entries": "nbCustomEntries", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class DictionaryLanguage(BaseModel): """ Dictionary type. If `null`, this dictionary type isn't supported for the language. """ - nb_custom_entries: Optional[int] = Field(default=None, alias="nbCustomEntries") + nb_custom_entries: Optional[int] = None """ Number of custom dictionary entries. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class DictionaryLanguage(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/dictionary_settings_params.py b/algoliasearch/search/models/dictionary_settings_params.py index 4e32be5e7..37199ead8 100644 --- a/algoliasearch/search/models/dictionary_settings_params.py +++ b/algoliasearch/search/models/dictionary_settings_params.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,28 @@ from algoliasearch.search.models.standard_entries import StandardEntries +_ALIASES = { + "disable_standard_entries": "disableStandardEntries", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class DictionarySettingsParams(BaseModel): """ Turn on or off the built-in Algolia stop words for a specific language. """ - disable_standard_entries: StandardEntries = Field(alias="disableStandardEntries") + disable_standard_entries: StandardEntries model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/distinct.py b/algoliasearch/search/models/distinct.py index fcc03353d..6f375f0b4 100644 --- a/algoliasearch/search/models/distinct.py +++ b/algoliasearch/search/models/distinct.py @@ -27,7 +27,7 @@ class Distinct(BaseModel): """ Whether deduplication is turned on. If true, only one member of a group is shown in the search results. """ oneof_schema_2_validator: Optional[int] = Field(default=None) """ Number of members of a group of records to include in the search results. - Don't use `distinct > 1` for records that might be [promoted by rules](https://www.algolia.com/doc/guides/managing-results/rules/merchandising-and-promoting/how-to/promote-hits/). The number of hits won't be correct and faceting won't work as expected. - With `distinct > 1`, the `hitsPerPage` parameter controls the number of returned groups. For example, with `hitsPerPage: 10` and `distinct: 2`, up to 20 records are returned. Likewise, the `nbHits` response attribute contains the number of returned groups. """ - actual_instance: Optional[Union[bool, int]] = None + actual_instance: Union[bool, int, None] = None one_of_schemas: Set[str] = {"bool", "int"} def __init__(self, *args, **kwargs) -> None: @@ -40,12 +40,12 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[bool, int]]: + def unwrap_actual_instance(self) -> Union[bool, int, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -88,9 +88,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -100,8 +100,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], bool, int]]: return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/search/models/edit.py b/algoliasearch/search/models/edit.py index 7f0ee1a38..6f55e4989 100644 --- a/algoliasearch/search/models/edit.py +++ b/algoliasearch/search/models/edit.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,16 +20,26 @@ from algoliasearch.search.models.edit_type import EditType +_ALIASES = { + "type": "type", + "delete": "delete", + "insert": "insert", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class Edit(BaseModel): """ Edit """ - type: Optional[EditType] = Field(default=None, alias="type") - delete: Optional[str] = Field(default=None, alias="delete") + type: Optional[EditType] = None + delete: Optional[str] = None """ Text or patterns to remove from the query string. """ - insert: Optional[str] = Field(default=None, alias="insert") + insert: Optional[str] = None """ Text to be added in place of the deleted text inside the query string. """ model_config = ConfigDict( @@ -37,6 +47,7 @@ class Edit(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/error_base.py b/algoliasearch/search/models/error_base.py index 075d8a3ac..01e47a756 100644 --- a/algoliasearch/search/models/error_base.py +++ b/algoliasearch/search/models/error_base.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,28 @@ from typing_extensions import Self +_ALIASES = { + "message": "message", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class ErrorBase(BaseModel): """ Error. """ - message: Optional[str] = Field(default=None, alias="message") + message: Optional[str] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, extra="allow", ) diff --git a/algoliasearch/search/models/exhaustive.py b/algoliasearch/search/models/exhaustive.py index 1b7e3b403..36b73d024 100644 --- a/algoliasearch/search/models/exhaustive.py +++ b/algoliasearch/search/models/exhaustive.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,20 +18,33 @@ from typing_extensions import Self +_ALIASES = { + "facets_count": "facetsCount", + "facet_values": "facetValues", + "nb_hits": "nbHits", + "rules_match": "rulesMatch", + "typo": "typo", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class Exhaustive(BaseModel): """ Whether certain properties of the search response are calculated exhaustive (exact) or approximated. """ - facets_count: Optional[bool] = Field(default=None, alias="facetsCount") + facets_count: Optional[bool] = None """ Whether the facet count is exhaustive (`true`) or approximate (`false`). See the [related discussion](https://support.algolia.com/hc/en-us/articles/4406975248145-Why-are-my-facet-and-hit-counts-not-accurate-). """ - facet_values: Optional[bool] = Field(default=None, alias="facetValues") + facet_values: Optional[bool] = None """ The value is `false` if not all facet values are retrieved. """ - nb_hits: Optional[bool] = Field(default=None, alias="nbHits") + nb_hits: Optional[bool] = None """ Whether the `nbHits` is exhaustive (`true`) or approximate (`false`). When the query takes more than 50ms to be processed, the engine makes an approximation. This can happen when using complex filters on millions of records, when typo-tolerance was not exhaustive, or when enough hits have been retrieved (for example, after the engine finds 10,000 exact matches). `nbHits` is reported as non-exhaustive whenever an approximation is made, even if the approximation didn’t, in the end, impact the exhaustivity of the query. """ - rules_match: Optional[bool] = Field(default=None, alias="rulesMatch") + rules_match: Optional[bool] = None """ Rules matching exhaustivity. The value is `false` if rules were enable for this query, and could not be fully processed due a timeout. This is generally caused by the number of alternatives (such as typos) which is too large. """ - typo: Optional[bool] = Field(default=None, alias="typo") + typo: Optional[bool] = None """ Whether the typo search was exhaustive (`true`) or approximate (`false`). An approximation is done when the typo search query part takes more than 10% of the query budget (ie. 5ms by default) to be processed (this can happen when a lot of typo alternatives exist for the query). This field will not be included when typo-tolerance is entirely disabled. """ model_config = ConfigDict( @@ -39,6 +52,7 @@ class Exhaustive(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/facet_filters.py b/algoliasearch/search/models/facet_filters.py index 9669c5fb0..e31fc57d6 100644 --- a/algoliasearch/search/models/facet_filters.py +++ b/algoliasearch/search/models/facet_filters.py @@ -27,7 +27,7 @@ class FacetFilters(BaseModel): oneof_schema_2_validator: Optional[str] = Field(default=None) - actual_instance: Optional[Union[List[FacetFilters], str]] = None + actual_instance: Union[List[FacetFilters], str, None] = None one_of_schemas: Set[str] = {"List[FacetFilters]", "str"} def __init__(self, *args, **kwargs) -> None: @@ -40,12 +40,12 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[List[FacetFilters], str]]: + def unwrap_actual_instance(self) -> Union[List[FacetFilters], str, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -88,9 +88,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -100,8 +100,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], List[FacetFilters], str]]: return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/search/models/facet_hits.py b/algoliasearch/search/models/facet_hits.py index 3b3376e4e..e1b65e8c0 100644 --- a/algoliasearch/search/models/facet_hits.py +++ b/algoliasearch/search/models/facet_hits.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "value": "value", + "highlighted": "highlighted", + "count": "count", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class FacetHits(BaseModel): """ FacetHits """ - value: str = Field(alias="value") + value: str """ Facet value. """ - highlighted: str = Field(alias="highlighted") + highlighted: str """ Highlighted attribute value, including HTML tags. """ - count: int = Field(alias="count") + count: int """ Number of records with this facet value. [The count may be approximated](https://support.algolia.com/hc/en-us/articles/4406975248145-Why-are-my-facet-and-hit-counts-not-accurate-). """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class FacetHits(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/facet_ordering.py b/algoliasearch/search/models/facet_ordering.py index d91597f01..6f92c00d6 100644 --- a/algoliasearch/search/models/facet_ordering.py +++ b/algoliasearch/search/models/facet_ordering.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,14 +21,23 @@ from algoliasearch.search.models.facets import Facets from algoliasearch.search.models.value import Value +_ALIASES = { + "facets": "facets", + "values": "values", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class FacetOrdering(BaseModel): """ Order of facet names and facet values in your UI. """ - facets: Optional[Facets] = Field(default=None, alias="facets") - values: Optional[Dict[str, Value]] = Field(default=None, alias="values") + facets: Optional[Facets] = None + values: Optional[Dict[str, Value]] = None """ Order of facet values. One object for each facet. """ model_config = ConfigDict( @@ -36,6 +45,7 @@ class FacetOrdering(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/facet_stats.py b/algoliasearch/search/models/facet_stats.py index ada778720..d59c834b9 100644 --- a/algoliasearch/search/models/facet_stats.py +++ b/algoliasearch/search/models/facet_stats.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,30 @@ from typing_extensions import Self +_ALIASES = { + "min": "min", + "max": "max", + "avg": "avg", + "sum": "sum", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class FacetStats(BaseModel): """ FacetStats """ - min: Optional[float] = Field(default=None, alias="min") + min: Optional[float] = None """ Minimum value in the results. """ - max: Optional[float] = Field(default=None, alias="max") + max: Optional[float] = None """ Maximum value in the results. """ - avg: Optional[float] = Field(default=None, alias="avg") + avg: Optional[float] = None """ Average facet value in the results. """ - sum: Optional[float] = Field(default=None, alias="sum") + sum: Optional[float] = None """ Sum of all values in the results. """ model_config = ConfigDict( @@ -37,6 +49,7 @@ class FacetStats(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/facets.py b/algoliasearch/search/models/facets.py index d840e71a4..0eab5aacf 100644 --- a/algoliasearch/search/models/facets.py +++ b/algoliasearch/search/models/facets.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "order": "order", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class Facets(BaseModel): """ Order of facet names. """ - order: Optional[List[str]] = Field(default=None, alias="order") + order: Optional[List[str]] = None """ Explicit order of facets or facet values. This setting lets you always show specific facets or facet values at the top of the list. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class Facets(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/fetched_index.py b/algoliasearch/search/models/fetched_index.py index 6437feeae..200dd5002 100644 --- a/algoliasearch/search/models/fetched_index.py +++ b/algoliasearch/search/models/fetched_index.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,34 +18,54 @@ from typing_extensions import Self +_ALIASES = { + "name": "name", + "created_at": "createdAt", + "updated_at": "updatedAt", + "entries": "entries", + "data_size": "dataSize", + "file_size": "fileSize", + "last_build_time_s": "lastBuildTimeS", + "number_of_pending_tasks": "numberOfPendingTasks", + "pending_task": "pendingTask", + "primary": "primary", + "replicas": "replicas", + "virtual": "virtual", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class FetchedIndex(BaseModel): """ FetchedIndex """ - name: str = Field(alias="name") + name: str """ Index name. """ - created_at: str = Field(alias="createdAt") + created_at: str """ Index creation date. An empty string means that the index has no records. """ - updated_at: str = Field(alias="updatedAt") + updated_at: str """ Date and time when the object was updated, in RFC 3339 format. """ - entries: int = Field(alias="entries") + entries: int """ Number of records contained in the index. """ - data_size: int = Field(alias="dataSize") + data_size: int """ Number of bytes of the index in minified format. """ - file_size: int = Field(alias="fileSize") + file_size: int """ Number of bytes of the index binary file. """ - last_build_time_s: int = Field(alias="lastBuildTimeS") + last_build_time_s: int """ Last build time. """ - number_of_pending_tasks: int = Field(alias="numberOfPendingTasks") + number_of_pending_tasks: int """ Number of pending indexing operations. This value is deprecated and should not be used. """ - pending_task: bool = Field(alias="pendingTask") + pending_task: bool """ A boolean which says whether the index has pending tasks. This value is deprecated and should not be used. """ - primary: Optional[str] = Field(default=None, alias="primary") + primary: Optional[str] = None """ Only present if the index is a replica. Contains the name of the related primary index. """ - replicas: Optional[List[str]] = Field(default=None, alias="replicas") + replicas: Optional[List[str]] = None """ Only present if the index is a primary index with replicas. Contains the names of all linked replicas. """ - virtual: Optional[bool] = Field(default=None, alias="virtual") + virtual: Optional[bool] = None """ Only present if the index is a [virtual replica](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/how-to/sort-an-index-alphabetically/#virtual-replicas). """ model_config = ConfigDict( @@ -53,6 +73,7 @@ class FetchedIndex(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/get_api_key_response.py b/algoliasearch/search/models/get_api_key_response.py index 718e28e16..abe4d753d 100644 --- a/algoliasearch/search/models/get_api_key_response.py +++ b/algoliasearch/search/models/get_api_key_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,33 +20,48 @@ from algoliasearch.search.models.acl import Acl +_ALIASES = { + "value": "value", + "created_at": "createdAt", + "acl": "acl", + "description": "description", + "indexes": "indexes", + "max_hits_per_query": "maxHitsPerQuery", + "max_queries_per_ip_per_hour": "maxQueriesPerIPPerHour", + "query_parameters": "queryParameters", + "referers": "referers", + "validity": "validity", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetApiKeyResponse(BaseModel): """ GetApiKeyResponse """ - value: Optional[str] = Field(default=None, alias="value") + value: Optional[str] = None """ API key. """ - created_at: int = Field(alias="createdAt") + created_at: int """ Timestamp when the object was created, in milliseconds since the Unix epoch. """ - acl: List[Acl] = Field(alias="acl") + acl: List[Acl] """ Permissions that determine the type of API requests this key can make. The required ACL is listed in each endpoint's reference. For more information, see [access control list](https://www.algolia.com/doc/guides/security/api-keys/#access-control-list-acl). """ - description: Optional[str] = Field(default=None, alias="description") + description: Optional[str] = None """ Description of an API key to help you identify this API key. """ - indexes: Optional[List[str]] = Field(default=None, alias="indexes") + indexes: Optional[List[str]] = None """ Index names or patterns that this API key can access. By default, an API key can access all indices in the same application. You can use leading and trailing wildcard characters (`*`): - `dev_*` matches all indices starting with \"dev_\". - `*_dev` matches all indices ending with \"_dev\". - `*_products_*` matches all indices containing \"_products_\". """ - max_hits_per_query: Optional[int] = Field(default=None, alias="maxHitsPerQuery") + max_hits_per_query: Optional[int] = None """ Maximum number of results this API key can retrieve in one query. By default, there's no limit. """ - max_queries_per_ip_per_hour: Optional[int] = Field( - default=None, alias="maxQueriesPerIPPerHour" - ) + max_queries_per_ip_per_hour: Optional[int] = None """ Maximum number of API requests allowed per IP address or [user token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/) per hour. If this limit is reached, the API returns an error with status code `429`. By default, there's no limit. """ - query_parameters: Optional[str] = Field(default=None, alias="queryParameters") + query_parameters: Optional[str] = None """ Query parameters to add when making API requests with this API key. To restrict this API key to specific IP addresses, add the `restrictSources` parameter. You can only add a single source, but you can provide a range of IP addresses. Creating an API key fails if the request is made from an IP address that's outside the restricted range. """ - referers: Optional[List[str]] = Field(default=None, alias="referers") + referers: Optional[List[str]] = None """ Allowed HTTP referrers for this API key. By default, all referrers are allowed. You can use leading and trailing wildcard characters (`*`): - `https://algolia.com/*` allows all referrers starting with \"https://algolia.com/\" - `*.algolia.com` allows all referrers ending with \".algolia.com\" - `*algolia.com*` allows all referrers in the domain \"algolia.com\". Like all HTTP headers, referrers can be spoofed. Don't rely on them to secure your data. For more information, see [HTTP referrer restrictions](https://www.algolia.com/doc/guides/security/security-best-practices/#http-referrers-restrictions). """ - validity: Optional[int] = Field(default=None, alias="validity") + validity: Optional[int] = None """ Duration (in seconds) after which the API key expires. By default, API keys don't expire. """ model_config = ConfigDict( @@ -54,6 +69,7 @@ class GetApiKeyResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/get_dictionary_settings_response.py b/algoliasearch/search/models/get_dictionary_settings_response.py index be6e9eda6..bf33d3350 100644 --- a/algoliasearch/search/models/get_dictionary_settings_response.py +++ b/algoliasearch/search/models/get_dictionary_settings_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,28 @@ from algoliasearch.search.models.standard_entries import StandardEntries +_ALIASES = { + "disable_standard_entries": "disableStandardEntries", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetDictionarySettingsResponse(BaseModel): """ GetDictionarySettingsResponse """ - disable_standard_entries: StandardEntries = Field(alias="disableStandardEntries") + disable_standard_entries: StandardEntries model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/get_logs_response.py b/algoliasearch/search/models/get_logs_response.py index 5be352272..7c7e84857 100644 --- a/algoliasearch/search/models/get_logs_response.py +++ b/algoliasearch/search/models/get_logs_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,28 @@ from algoliasearch.search.models.log import Log +_ALIASES = { + "logs": "logs", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetLogsResponse(BaseModel): """ GetLogsResponse """ - logs: List[Log] = Field(alias="logs") + logs: List[Log] model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/get_objects_params.py b/algoliasearch/search/models/get_objects_params.py index ad5fffd29..d16e3f06c 100644 --- a/algoliasearch/search/models/get_objects_params.py +++ b/algoliasearch/search/models/get_objects_params.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,28 @@ from algoliasearch.search.models.get_objects_request import GetObjectsRequest +_ALIASES = { + "requests": "requests", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetObjectsParams(BaseModel): """ Request parameters. """ - requests: List[GetObjectsRequest] = Field(alias="requests") + requests: List[GetObjectsRequest] model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/get_objects_request.py b/algoliasearch/search/models/get_objects_request.py index a29204355..8634bca9d 100644 --- a/algoliasearch/search/models/get_objects_request.py +++ b/algoliasearch/search/models/get_objects_request.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "attributes_to_retrieve": "attributesToRetrieve", + "object_id": "objectID", + "index_name": "indexName", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class GetObjectsRequest(BaseModel): """ Request body for retrieving records. """ - attributes_to_retrieve: Optional[List[str]] = Field( - default=None, alias="attributesToRetrieve" - ) + attributes_to_retrieve: Optional[List[str]] = None """ Attributes to retrieve. If not specified, all retrievable attributes are returned. """ - object_id: str = Field(alias="objectID") + object_id: str """ Object ID for the record to retrieve. """ - index_name: str = Field(alias="indexName") + index_name: str """ Index from which to retrieve the records. """ model_config = ConfigDict( @@ -37,6 +46,7 @@ class GetObjectsRequest(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/get_objects_response.py b/algoliasearch/search/models/get_objects_response.py index 8b1af4379..b2819c51a 100644 --- a/algoliasearch/search/models/get_objects_response.py +++ b/algoliasearch/search/models/get_objects_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "results": "results", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class GetObjectsResponse(BaseModel): """ GetObjectsResponse """ - results: List[object] = Field(alias="results") + results: List[object] """ Retrieved records. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class GetObjectsResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/get_task_response.py b/algoliasearch/search/models/get_task_response.py index 74dee9eec..e8aa9c3df 100644 --- a/algoliasearch/search/models/get_task_response.py +++ b/algoliasearch/search/models/get_task_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,28 @@ from algoliasearch.search.models.task_status import TaskStatus +_ALIASES = { + "status": "status", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetTaskResponse(BaseModel): """ GetTaskResponse """ - status: TaskStatus = Field(alias="status") + status: TaskStatus model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/get_top_user_ids_response.py b/algoliasearch/search/models/get_top_user_ids_response.py index 99938960d..53d17228b 100644 --- a/algoliasearch/search/models/get_top_user_ids_response.py +++ b/algoliasearch/search/models/get_top_user_ids_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,13 +20,21 @@ from algoliasearch.search.models.user_id import UserId +_ALIASES = { + "top_users": "topUsers", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class GetTopUserIdsResponse(BaseModel): """ User IDs and clusters. """ - top_users: List[Dict[str, List[UserId]]] = Field(alias="topUsers") + top_users: List[Dict[str, List[UserId]]] """ Key-value pairs with cluster names as keys and lists of users with the highest number of records per cluster as values. """ model_config = ConfigDict( @@ -34,6 +42,7 @@ class GetTopUserIdsResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/has_pending_mappings_response.py b/algoliasearch/search/models/has_pending_mappings_response.py index 6720e0577..232695f3c 100644 --- a/algoliasearch/search/models/has_pending_mappings_response.py +++ b/algoliasearch/search/models/has_pending_mappings_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "pending": "pending", + "clusters": "clusters", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class HasPendingMappingsResponse(BaseModel): """ HasPendingMappingsResponse """ - pending: bool = Field(alias="pending") + pending: bool """ Whether there are clusters undergoing migration, creation, or deletion. """ - clusters: Optional[Dict[str, List[str]]] = Field(default=None, alias="clusters") + clusters: Optional[Dict[str, List[str]]] = None """ Cluster pending mapping state: migrating, creating, deleting. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class HasPendingMappingsResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/highlight_result.py b/algoliasearch/search/models/highlight_result.py index a41cb3e20..bc9faff6e 100644 --- a/algoliasearch/search/models/highlight_result.py +++ b/algoliasearch/search/models/highlight_result.py @@ -32,8 +32,8 @@ class HighlightResult(BaseModel): """ Surround words that match the query with HTML tags for highlighting. """ oneof_schema_3_validator: Optional[List[HighlightResult]] = Field(default=None) """ Surround words that match the query with HTML tags for highlighting. """ - actual_instance: Optional[ - Union[Dict[str, HighlightResult], HighlightResultOption, List[HighlightResult]] + actual_instance: Union[ + Dict[str, HighlightResult], HighlightResultOption, List[HighlightResult], None ] = None one_of_schemas: Set[str] = { "Dict[str, HighlightResult]", @@ -51,15 +51,19 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer def unwrap_actual_instance( self, - ) -> Optional[ - Union[Dict[str, HighlightResult], HighlightResultOption, List[HighlightResult]] + ) -> Union[ + Dict[str, HighlightResult], + HighlightResultOption, + List[HighlightResult], + Self, + None, ]: """ Unwraps the `actual_instance` when calling the `to_json` method. @@ -109,9 +113,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -130,8 +134,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/search/models/highlight_result_option.py b/algoliasearch/search/models/highlight_result_option.py index d36432b4f..8b312ba2c 100644 --- a/algoliasearch/search/models/highlight_result_option.py +++ b/algoliasearch/search/models/highlight_result_option.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,18 +20,29 @@ from algoliasearch.search.models.match_level import MatchLevel +_ALIASES = { + "value": "value", + "match_level": "matchLevel", + "matched_words": "matchedWords", + "fully_highlighted": "fullyHighlighted", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class HighlightResultOption(BaseModel): """ Surround words that match the query with HTML tags for highlighting. """ - value: str = Field(alias="value") + value: str """ Highlighted attribute value, including HTML tags. """ - match_level: MatchLevel = Field(alias="matchLevel") - matched_words: List[str] = Field(alias="matchedWords") + match_level: MatchLevel + matched_words: List[str] """ List of matched words from the search query. """ - fully_highlighted: Optional[bool] = Field(default=None, alias="fullyHighlighted") + fully_highlighted: Optional[bool] = None """ Whether the entire attribute value is highlighted. """ model_config = ConfigDict( @@ -39,6 +50,7 @@ class HighlightResultOption(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/hit.py b/algoliasearch/search/models/hit.py index d2e725e56..ebafd90bb 100644 --- a/algoliasearch/search/models/hit.py +++ b/algoliasearch/search/models/hit.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,30 +22,39 @@ from algoliasearch.search.models.ranking_info import RankingInfo from algoliasearch.search.models.snippet_result import SnippetResult +_ALIASES = { + "object_id": "objectID", + "highlight_result": "_highlightResult", + "snippet_result": "_snippetResult", + "ranking_info": "_rankingInfo", + "distinct_seq_id": "_distinctSeqID", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class Hit(BaseModel): """ Search result. A hit is a record from your index, augmented with special attributes for highlighting, snippeting, and ranking. """ - object_id: str = Field(alias="objectID") + object_id: str """ Unique record identifier. """ - highlight_result: Optional[Dict[str, HighlightResult]] = Field( - default=None, alias="_highlightResult" - ) + highlight_result: Optional[Dict[str, HighlightResult]] = None """ Surround words that match the query with HTML tags for highlighting. """ - snippet_result: Optional[Dict[str, SnippetResult]] = Field( - default=None, alias="_snippetResult" - ) + snippet_result: Optional[Dict[str, SnippetResult]] = None """ Snippets that show the context around a matching search query. """ - ranking_info: Optional[RankingInfo] = Field(default=None, alias="_rankingInfo") - distinct_seq_id: Optional[int] = Field(default=None, alias="_distinctSeqID") + ranking_info: Optional[RankingInfo] = None + distinct_seq_id: Optional[int] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, extra="allow", ) diff --git a/algoliasearch/search/models/ignore_plurals.py b/algoliasearch/search/models/ignore_plurals.py index 3076c80ea..4ac617988 100644 --- a/algoliasearch/search/models/ignore_plurals.py +++ b/algoliasearch/search/models/ignore_plurals.py @@ -33,9 +33,7 @@ class IgnorePlurals(BaseModel): oneof_schema_3_validator: Optional[bool] = Field(default=None) """ If true, `ignorePlurals` is active for all languages included in `queryLanguages`, or for all supported languages, if `queryLanguges` is empty. If false, singulars, plurals, and other declensions won't be considered equivalent. """ - actual_instance: Optional[Union[BooleanString, List[SupportedLanguage], bool]] = ( - None - ) + actual_instance: Union[BooleanString, List[SupportedLanguage], bool, None] = None one_of_schemas: Set[str] = {"BooleanString", "List[SupportedLanguage]", "bool"} def __init__(self, *args, **kwargs) -> None: @@ -48,14 +46,14 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer def unwrap_actual_instance( self, - ) -> Optional[Union[BooleanString, List[SupportedLanguage], bool]]: + ) -> Union[BooleanString, List[SupportedLanguage], bool, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -104,9 +102,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -118,8 +116,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/search/models/index_settings.py b/algoliasearch/search/models/index_settings.py index 777956056..cd7dfb310 100644 --- a/algoliasearch/search/models/index_settings.py +++ b/algoliasearch/search/models/index_settings.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -37,205 +37,198 @@ from algoliasearch.search.models.supported_language import SupportedLanguage from algoliasearch.search.models.typo_tolerance import TypoTolerance +_ALIASES = { + "attributes_for_faceting": "attributesForFaceting", + "replicas": "replicas", + "pagination_limited_to": "paginationLimitedTo", + "unretrievable_attributes": "unretrievableAttributes", + "disable_typo_tolerance_on_words": "disableTypoToleranceOnWords", + "attributes_to_transliterate": "attributesToTransliterate", + "camel_case_attributes": "camelCaseAttributes", + "decompounded_attributes": "decompoundedAttributes", + "index_languages": "indexLanguages", + "disable_prefix_on_attributes": "disablePrefixOnAttributes", + "allow_compression_of_integer_array": "allowCompressionOfIntegerArray", + "numeric_attributes_for_filtering": "numericAttributesForFiltering", + "separators_to_index": "separatorsToIndex", + "searchable_attributes": "searchableAttributes", + "user_data": "userData", + "custom_normalization": "customNormalization", + "attribute_for_distinct": "attributeForDistinct", + "attributes_to_retrieve": "attributesToRetrieve", + "ranking": "ranking", + "custom_ranking": "customRanking", + "relevancy_strictness": "relevancyStrictness", + "attributes_to_highlight": "attributesToHighlight", + "attributes_to_snippet": "attributesToSnippet", + "highlight_pre_tag": "highlightPreTag", + "highlight_post_tag": "highlightPostTag", + "snippet_ellipsis_text": "snippetEllipsisText", + "restrict_highlight_and_snippet_arrays": "restrictHighlightAndSnippetArrays", + "hits_per_page": "hitsPerPage", + "min_word_sizefor1_typo": "minWordSizefor1Typo", + "min_word_sizefor2_typos": "minWordSizefor2Typos", + "typo_tolerance": "typoTolerance", + "allow_typos_on_numeric_tokens": "allowTyposOnNumericTokens", + "disable_typo_tolerance_on_attributes": "disableTypoToleranceOnAttributes", + "ignore_plurals": "ignorePlurals", + "remove_stop_words": "removeStopWords", + "keep_diacritics_on_characters": "keepDiacriticsOnCharacters", + "query_languages": "queryLanguages", + "decompound_query": "decompoundQuery", + "enable_rules": "enableRules", + "enable_personalization": "enablePersonalization", + "query_type": "queryType", + "remove_words_if_no_results": "removeWordsIfNoResults", + "mode": "mode", + "semantic_search": "semanticSearch", + "advanced_syntax": "advancedSyntax", + "optional_words": "optionalWords", + "disable_exact_on_attributes": "disableExactOnAttributes", + "exact_on_single_word_query": "exactOnSingleWordQuery", + "alternatives_as_exact": "alternativesAsExact", + "advanced_syntax_features": "advancedSyntaxFeatures", + "distinct": "distinct", + "replace_synonyms_in_highlight": "replaceSynonymsInHighlight", + "min_proximity": "minProximity", + "response_fields": "responseFields", + "max_facet_hits": "maxFacetHits", + "max_values_per_facet": "maxValuesPerFacet", + "sort_facet_values_by": "sortFacetValuesBy", + "attribute_criteria_computed_by_min_proximity": "attributeCriteriaComputedByMinProximity", + "rendering_content": "renderingContent", + "enable_re_ranking": "enableReRanking", + "re_ranking_apply_filter": "reRankingApplyFilter", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class IndexSettings(BaseModel): """ Index settings. """ - attributes_for_faceting: Optional[List[str]] = Field( - default=None, alias="attributesForFaceting" - ) + attributes_for_faceting: Optional[List[str]] = None """ Attributes used for [faceting](https://www.algolia.com/doc/guides/managing-results/refine-results/faceting/). Facets are attributes that let you categorize search results. They can be used for filtering search results. By default, no attribute is used for faceting. Attribute names are case-sensitive. **Modifiers** - `filterOnly(\"ATTRIBUTE\")`. Allows using this attribute as a filter, but doesn't evalue the facet values. - `searchable(\"ATTRIBUTE\")`. Allows searching for facet values. - `afterDistinct(\"ATTRIBUTE\")`. Evaluates the facet count _after_ deduplication with `distinct`. This ensures accurate facet counts. You can apply this modifier to searchable facets: `afterDistinct(searchable(ATTRIBUTE))`. """ - replicas: Optional[List[str]] = Field(default=None, alias="replicas") + replicas: Optional[List[str]] = None """ Creates [replica indices](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/in-depth/replicas/). Replicas are copies of a primary index with the same records but different settings, synonyms, or rules. If you want to offer a different ranking or sorting of your search results, you'll use replica indices. All index operations on a primary index are automatically forwarded to its replicas. To add a replica index, you must provide the complete set of replicas to this parameter. If you omit a replica from this list, the replica turns into a regular, standalone index that will no longer by synced with the primary index. **Modifier** - `virtual(\"REPLICA\")`. Create a virtual replica, Virtual replicas don't increase the number of records and are optimized for [Relevant sorting](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/in-depth/relevant-sort/). """ - pagination_limited_to: Optional[int] = Field( - default=None, alias="paginationLimitedTo" - ) + pagination_limited_to: Optional[int] = None """ Maximum number of search results that can be obtained through pagination. Higher pagination limits might slow down your search. For pagination limits above 1,000, the sorting of results beyond the 1,000th hit can't be guaranteed. """ - unretrievable_attributes: Optional[List[str]] = Field( - default=None, alias="unretrievableAttributes" - ) + unretrievable_attributes: Optional[List[str]] = None """ Attributes that can't be retrieved at query time. This can be useful if you want to use an attribute for ranking or to [restrict access](https://www.algolia.com/doc/guides/security/api-keys/how-to/user-restricted-access-to-data/), but don't want to include it in the search results. Attribute names are case-sensitive. """ - disable_typo_tolerance_on_words: Optional[List[str]] = Field( - default=None, alias="disableTypoToleranceOnWords" - ) + disable_typo_tolerance_on_words: Optional[List[str]] = None """ Words for which you want to turn off [typo tolerance](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/). This also turns off [word splitting and concatenation](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/splitting-and-concatenation/) for the specified words. """ - attributes_to_transliterate: Optional[List[str]] = Field( - default=None, alias="attributesToTransliterate" - ) + attributes_to_transliterate: Optional[List[str]] = None """ Attributes, for which you want to support [Japanese transliteration](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/#japanese-transliteration-and-type-ahead). Transliteration supports searching in any of the Japanese writing systems. To support transliteration, you must set the indexing language to Japanese. Attribute names are case-sensitive. """ - camel_case_attributes: Optional[List[str]] = Field( - default=None, alias="camelCaseAttributes" - ) + camel_case_attributes: Optional[List[str]] = None """ Attributes for which to split [camel case](https://wikipedia.org/wiki/Camel_case) words. Attribute names are case-sensitive. """ - decompounded_attributes: Optional[object] = Field( - default=None, alias="decompoundedAttributes" - ) + decompounded_attributes: Optional[object] = None """ Searchable attributes to which Algolia should apply [word segmentation](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/how-to/customize-segmentation/) (decompounding). Attribute names are case-sensitive. Compound words are formed by combining two or more individual words, and are particularly prevalent in Germanic languages—for example, \"firefighter\". With decompounding, the individual components are indexed separately. You can specify different lists for different languages. Decompounding is supported for these languages: Dutch (`nl`), German (`de`), Finnish (`fi`), Danish (`da`), Swedish (`sv`), and Norwegian (`no`). Decompounding doesn't work for words with [non-spacing mark Unicode characters](https://www.charactercodes.net/category/non-spacing_mark). For example, `Gartenstühle` won't be decompounded if the `ü` consists of `u` (U+0075) and `◌̈` (U+0308). """ - index_languages: Optional[List[SupportedLanguage]] = Field( - default=None, alias="indexLanguages" - ) + index_languages: Optional[List[SupportedLanguage]] = None """ Languages for language-specific processing steps, such as word detection and dictionary settings. **You should always specify an indexing language.** If you don't specify an indexing language, the search engine uses all [supported languages](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/supported-languages/), or the languages you specified with the `ignorePlurals` or `removeStopWords` parameters. This can lead to unexpected search results. For more information, see [Language-specific configuration](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/). """ - disable_prefix_on_attributes: Optional[List[str]] = Field( - default=None, alias="disablePrefixOnAttributes" - ) + disable_prefix_on_attributes: Optional[List[str]] = None """ Searchable attributes for which you want to turn off [prefix matching](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/override-search-engine-defaults/#adjusting-prefix-search). Attribute names are case-sensitive. """ - allow_compression_of_integer_array: Optional[bool] = Field( - default=None, alias="allowCompressionOfIntegerArray" - ) + allow_compression_of_integer_array: Optional[bool] = None """ Whether arrays with exclusively non-negative integers should be compressed for better performance. If true, the compressed arrays may be reordered. """ - numeric_attributes_for_filtering: Optional[List[str]] = Field( - default=None, alias="numericAttributesForFiltering" - ) + numeric_attributes_for_filtering: Optional[List[str]] = None """ Numeric attributes that can be used as [numerical filters](https://www.algolia.com/doc/guides/managing-results/rules/detecting-intent/how-to/applying-a-custom-filter-for-a-specific-query/#numerical-filters). Attribute names are case-sensitive. By default, all numeric attributes are available as numerical filters. For faster indexing, reduce the number of numeric attributes. If you want to turn off filtering for all numeric attributes, specifiy an attribute that doesn't exist in your index, such as `NO_NUMERIC_FILTERING`. **Modifier** - `equalOnly(\"ATTRIBUTE\")`. Support only filtering based on equality comparisons `=` and `!=`. """ - separators_to_index: Optional[str] = Field(default=None, alias="separatorsToIndex") + separators_to_index: Optional[str] = None """ Controls which separators are indexed. Separators are all non-letter characters except spaces and currency characters, such as $€£¥. By default, separator characters aren't indexed. With `separatorsToIndex`, Algolia treats separator characters as separate words. For example, a search for `C#` would report two matches. """ - searchable_attributes: Optional[List[str]] = Field( - default=None, alias="searchableAttributes" - ) + searchable_attributes: Optional[List[str]] = None """ Attributes used for searching. Attribute names are case-sensitive. By default, all attributes are searchable and the [Attribute](https://www.algolia.com/doc/guides/managing-results/relevance-overview/in-depth/ranking-criteria/#attribute) ranking criterion is turned off. With a non-empty list, Algolia only returns results with matches in the selected attributes. In addition, the Attribute ranking criterion is turned on: matches in attributes that are higher in the list of `searchableAttributes` rank first. To make matches in two attributes rank equally, include them in a comma-separated string, such as `\"title,alternate_title\"`. Attributes with the same priority are always unordered. For more information, see [Searchable attributes](https://www.algolia.com/doc/guides/sending-and-managing-data/prepare-your-data/how-to/setting-searchable-attributes/). **Modifier** - `unordered(\"ATTRIBUTE\")`. Ignore the position of a match within the attribute. Without modifier, matches at the beginning of an attribute rank higer than matches at the end. """ - user_data: Optional[object] = Field(default=None, alias="userData") + user_data: Optional[object] = None """ An object with custom data. You can store up to 32kB as custom data. """ - custom_normalization: Optional[Dict[str, Dict[str, str]]] = Field( - default=None, alias="customNormalization" - ) + custom_normalization: Optional[Dict[str, Dict[str, str]]] = None """ Characters and their normalized replacements. This overrides Algolia's default [normalization](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/normalization/). """ - attribute_for_distinct: Optional[str] = Field( - default=None, alias="attributeForDistinct" - ) + attribute_for_distinct: Optional[str] = None """ Attribute that should be used to establish groups of results. Attribute names are case-sensitive. All records with the same value for this attribute are considered a group. You can combine `attributeForDistinct` with the `distinct` search parameter to control how many items per group are included in the search results. If you want to use the same attribute also for faceting, use the `afterDistinct` modifier of the `attributesForFaceting` setting. This applies faceting _after_ deduplication, which will result in accurate facet counts. """ - attributes_to_retrieve: Optional[List[str]] = Field( - default=None, alias="attributesToRetrieve" - ) + attributes_to_retrieve: Optional[List[str]] = None """ Attributes to include in the API response. To reduce the size of your response, you can retrieve only some of the attributes. Attribute names are case-sensitive. - `*` retrieves all attributes, except attributes included in the `customRanking` and `unretrievableAttributes` settings. - To retrieve all attributes except a specific one, prefix the attribute with a dash and combine it with the `*`: `[\"*\", \"-ATTRIBUTE\"]`. - The `objectID` attribute is always included. """ - ranking: Optional[List[str]] = Field(default=None, alias="ranking") + ranking: Optional[List[str]] = None """ Determines the order in which Algolia returns your results. By default, each entry corresponds to a [ranking criteria](https://www.algolia.com/doc/guides/managing-results/relevance-overview/in-depth/ranking-criteria/). The tie-breaking algorithm sequentially applies each criterion in the order they're specified. If you configure a replica index for [sorting by an attribute](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/how-to/sort-by-attribute/), you put the sorting attribute at the top of the list. **Modifiers** - `asc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in ascending order. - `desc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in descending order. Before you modify the default setting, you should test your changes in the dashboard, and by [A/B testing](https://www.algolia.com/doc/guides/ab-testing/what-is-ab-testing/). """ - custom_ranking: Optional[List[str]] = Field(default=None, alias="customRanking") + custom_ranking: Optional[List[str]] = None """ Attributes to use as [custom ranking](https://www.algolia.com/doc/guides/managing-results/must-do/custom-ranking/). Attribute names are case-sensitive. The custom ranking attributes decide which items are shown first if the other ranking criteria are equal. Records with missing values for your selected custom ranking attributes are always sorted last. Boolean attributes are sorted based on their alphabetical order. **Modifiers** - `asc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in ascending order. - `desc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in descending order. If you use two or more custom ranking attributes, [reduce the precision](https://www.algolia.com/doc/guides/managing-results/must-do/custom-ranking/how-to/controlling-custom-ranking-metrics-precision/) of your first attributes, or the other attributes will never be applied. """ - relevancy_strictness: Optional[int] = Field( - default=None, alias="relevancyStrictness" - ) + relevancy_strictness: Optional[int] = None """ Relevancy threshold below which less relevant results aren't included in the results. You can only set `relevancyStrictness` on [virtual replica indices](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/in-depth/replicas/#what-are-virtual-replicas). Use this setting to strike a balance between the relevance and number of returned results. """ - attributes_to_highlight: Optional[List[str]] = Field( - default=None, alias="attributesToHighlight" - ) + attributes_to_highlight: Optional[List[str]] = None """ Attributes to highlight. By default, all searchable attributes are highlighted. Use `*` to highlight all attributes or use an empty array `[]` to turn off highlighting. Attribute names are case-sensitive. With highlighting, strings that match the search query are surrounded by HTML tags defined by `highlightPreTag` and `highlightPostTag`. You can use this to visually highlight matching parts of a search query in your UI. For more information, see [Highlighting and snippeting](https://www.algolia.com/doc/guides/building-search-ui/ui-and-ux-patterns/highlighting-snippeting/js/). """ - attributes_to_snippet: Optional[List[str]] = Field( - default=None, alias="attributesToSnippet" - ) + attributes_to_snippet: Optional[List[str]] = None """ Attributes for which to enable snippets. Attribute names are case-sensitive. Snippets provide additional context to matched words. If you enable snippets, they include 10 words, including the matched word. The matched word will also be wrapped by HTML tags for highlighting. You can adjust the number of words with the following notation: `ATTRIBUTE:NUMBER`, where `NUMBER` is the number of words to be extracted. """ - highlight_pre_tag: Optional[str] = Field(default=None, alias="highlightPreTag") + highlight_pre_tag: Optional[str] = None """ HTML tag to insert before the highlighted parts in all highlighted results and snippets. """ - highlight_post_tag: Optional[str] = Field(default=None, alias="highlightPostTag") + highlight_post_tag: Optional[str] = None """ HTML tag to insert after the highlighted parts in all highlighted results and snippets. """ - snippet_ellipsis_text: Optional[str] = Field( - default=None, alias="snippetEllipsisText" - ) + snippet_ellipsis_text: Optional[str] = None """ String used as an ellipsis indicator when a snippet is truncated. """ - restrict_highlight_and_snippet_arrays: Optional[bool] = Field( - default=None, alias="restrictHighlightAndSnippetArrays" - ) + restrict_highlight_and_snippet_arrays: Optional[bool] = None """ Whether to restrict highlighting and snippeting to items that at least partially matched the search query. By default, all items are highlighted and snippeted. """ - hits_per_page: Optional[int] = Field(default=None, alias="hitsPerPage") + hits_per_page: Optional[int] = None """ Number of hits per page. """ - min_word_sizefor1_typo: Optional[int] = Field( - default=None, alias="minWordSizefor1Typo" - ) + min_word_sizefor1_typo: Optional[int] = None """ Minimum number of characters a word in the search query must contain to accept matches with [one typo](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/in-depth/configuring-typo-tolerance/#configuring-word-length-for-typos). """ - min_word_sizefor2_typos: Optional[int] = Field( - default=None, alias="minWordSizefor2Typos" - ) + min_word_sizefor2_typos: Optional[int] = None """ Minimum number of characters a word in the search query must contain to accept matches with [two typos](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/in-depth/configuring-typo-tolerance/#configuring-word-length-for-typos). """ - typo_tolerance: Optional[TypoTolerance] = Field(default=None, alias="typoTolerance") - allow_typos_on_numeric_tokens: Optional[bool] = Field( - default=None, alias="allowTyposOnNumericTokens" - ) + typo_tolerance: Optional[TypoTolerance] = None + allow_typos_on_numeric_tokens: Optional[bool] = None """ Whether to allow typos on numbers in the search query. Turn off this setting to reduce the number of irrelevant matches when searching in large sets of similar numbers. """ - disable_typo_tolerance_on_attributes: Optional[List[str]] = Field( - default=None, alias="disableTypoToleranceOnAttributes" - ) + disable_typo_tolerance_on_attributes: Optional[List[str]] = None """ Attributes for which you want to turn off [typo tolerance](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/). Attribute names are case-sensitive. Returning only exact matches can help when: - [Searching in hyphenated attributes](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/how-to/how-to-search-in-hyphenated-attributes/). - Reducing the number of matches when you have too many. This can happen with attributes that are long blocks of text, such as product descriptions. Consider alternatives such as `disableTypoToleranceOnWords` or adding synonyms if your attributes have intentional unusual spellings that might look like typos. """ - ignore_plurals: Optional[IgnorePlurals] = Field(default=None, alias="ignorePlurals") - remove_stop_words: Optional[RemoveStopWords] = Field( - default=None, alias="removeStopWords" - ) - keep_diacritics_on_characters: Optional[str] = Field( - default=None, alias="keepDiacriticsOnCharacters" - ) + ignore_plurals: Optional[IgnorePlurals] = None + remove_stop_words: Optional[RemoveStopWords] = None + keep_diacritics_on_characters: Optional[str] = None """ Characters for which diacritics should be preserved. By default, Algolia removes diacritics from letters. For example, `é` becomes `e`. If this causes issues in your search, you can specify characters that should keep their diacritics. """ - query_languages: Optional[List[SupportedLanguage]] = Field( - default=None, alias="queryLanguages" - ) + query_languages: Optional[List[SupportedLanguage]] = None """ Languages for language-specific query processing steps such as plurals, stop-word removal, and word-detection dictionaries. This setting sets a default list of languages used by the `removeStopWords` and `ignorePlurals` settings. This setting also sets a dictionary for word detection in the logogram-based [CJK](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/normalization/#normalization-for-logogram-based-languages-cjk) languages. To support this, you must place the CJK language **first**. **You should always specify a query language.** If you don't specify an indexing language, the search engine uses all [supported languages](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/supported-languages/), or the languages you specified with the `ignorePlurals` or `removeStopWords` parameters. This can lead to unexpected search results. For more information, see [Language-specific configuration](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/). """ - decompound_query: Optional[bool] = Field(default=None, alias="decompoundQuery") + decompound_query: Optional[bool] = None """ Whether to split compound words in the query into their building blocks. For more information, see [Word segmentation](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/#splitting-compound-words). Word segmentation is supported for these languages: German, Dutch, Finnish, Swedish, and Norwegian. Decompounding doesn't work for words with [non-spacing mark Unicode characters](https://www.charactercodes.net/category/non-spacing_mark). For example, `Gartenstühle` won't be decompounded if the `ü` consists of `u` (U+0075) and `◌̈` (U+0308). """ - enable_rules: Optional[bool] = Field(default=None, alias="enableRules") + enable_rules: Optional[bool] = None """ Whether to enable rules. """ - enable_personalization: Optional[bool] = Field( - default=None, alias="enablePersonalization" - ) + enable_personalization: Optional[bool] = None """ Whether to enable Personalization. """ - query_type: Optional[QueryType] = Field(default=None, alias="queryType") - remove_words_if_no_results: Optional[RemoveWordsIfNoResults] = Field( - default=None, alias="removeWordsIfNoResults" - ) - mode: Optional[Mode] = Field(default=None, alias="mode") - semantic_search: Optional[SemanticSearch] = Field( - default=None, alias="semanticSearch" - ) - advanced_syntax: Optional[bool] = Field(default=None, alias="advancedSyntax") + query_type: Optional[QueryType] = None + remove_words_if_no_results: Optional[RemoveWordsIfNoResults] = None + mode: Optional[Mode] = None + semantic_search: Optional[SemanticSearch] = None + advanced_syntax: Optional[bool] = None """ Whether to support phrase matching and excluding words from search queries. Use the `advancedSyntaxFeatures` parameter to control which feature is supported. """ - optional_words: Optional[List[str]] = Field(default=None, alias="optionalWords") + optional_words: Optional[List[str]] = None """ Words that should be considered optional when found in the query. By default, records must match all words in the search query to be included in the search results. Adding optional words can help to increase the number of search results by running an additional search query that doesn't include the optional words. For example, if the search query is \"action video\" and \"video\" is an optional word, the search engine runs two queries. One for \"action video\" and one for \"action\". Records that match all words are ranked higher. For a search query with 4 or more words **and** all its words are optional, the number of matched words required for a record to be included in the search results increases for every 1,000 records: - If `optionalWords` has less than 10 words, the required number of matched words increases by 1: results 1 to 1,000 require 1 matched word, results 1,001 to 2000 need 2 matched words. - If `optionalWords` has 10 or more words, the number of required matched words increases by the number of optional words dividied by 5 (rounded down). For example, with 18 optional words: results 1 to 1,000 require 1 matched word, results 1,001 to 2000 need 4 matched words. For more information, see [Optional words](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/empty-or-insufficient-results/#creating-a-list-of-optional-words). """ - disable_exact_on_attributes: Optional[List[str]] = Field( - default=None, alias="disableExactOnAttributes" - ) + disable_exact_on_attributes: Optional[List[str]] = None """ Searchable attributes for which you want to [turn off the Exact ranking criterion](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/override-search-engine-defaults/in-depth/adjust-exact-settings/#turn-off-exact-for-some-attributes). Attribute names are case-sensitive. This can be useful for attributes with long values, where the likelyhood of an exact match is high, such as product descriptions. Turning off the Exact ranking criterion for these attributes favors exact matching on other attributes. This reduces the impact of individual attributes with a lot of content on ranking. """ - exact_on_single_word_query: Optional[ExactOnSingleWordQuery] = Field( - default=None, alias="exactOnSingleWordQuery" - ) - alternatives_as_exact: Optional[List[AlternativesAsExact]] = Field( - default=None, alias="alternativesAsExact" - ) + exact_on_single_word_query: Optional[ExactOnSingleWordQuery] = None + alternatives_as_exact: Optional[List[AlternativesAsExact]] = None """ Alternatives of query words that should be considered as exact matches by the Exact ranking criterion. - `ignorePlurals`. Plurals and similar declensions added by the `ignorePlurals` setting are considered exact matches. - `singleWordSynonym`. Single-word synonyms, such as \"NY/NYC\" are considered exact matches. - `multiWordsSynonym`. Multi-word synonyms, such as \"NY/New York\" are considered exact matches. """ - advanced_syntax_features: Optional[List[AdvancedSyntaxFeatures]] = Field( - default=None, alias="advancedSyntaxFeatures" - ) + advanced_syntax_features: Optional[List[AdvancedSyntaxFeatures]] = None """ Advanced search syntax features you want to support. - `exactPhrase`. Phrases in quotes must match exactly. For example, `sparkly blue \"iPhone case\"` only returns records with the exact string \"iPhone case\". - `excludeWords`. Query words prefixed with a `-` must not occur in a record. For example, `search -engine` matches records that contain \"search\" but not \"engine\". This setting only has an effect if `advancedSyntax` is true. """ - distinct: Optional[Distinct] = Field(default=None, alias="distinct") - replace_synonyms_in_highlight: Optional[bool] = Field( - default=None, alias="replaceSynonymsInHighlight" - ) + distinct: Optional[Distinct] = None + replace_synonyms_in_highlight: Optional[bool] = None """ Whether to replace a highlighted word with the matched synonym. By default, the original words are highlighted even if a synonym matches. For example, with `home` as a synonym for `house` and a search for `home`, records matching either \"home\" or \"house\" are included in the search results, and either \"home\" or \"house\" are highlighted. With `replaceSynonymsInHighlight` set to `true`, a search for `home` still matches the same records, but all occurences of \"house\" are replaced by \"home\" in the highlighted response. """ - min_proximity: Optional[int] = Field(default=None, alias="minProximity") + min_proximity: Optional[int] = None """ Minimum proximity score for two matching words. This adjusts the [Proximity ranking criterion](https://www.algolia.com/doc/guides/managing-results/relevance-overview/in-depth/ranking-criteria/#proximity) by equally scoring matches that are farther apart. For example, if `minProximity` is 2, neighboring matches and matches with one word between them would have the same score. """ - response_fields: Optional[List[str]] = Field(default=None, alias="responseFields") + response_fields: Optional[List[str]] = None """ Properties to include in the API response of `search` and `browse` requests. By default, all response properties are included. To reduce the response size, you can select, which attributes should be included. You can't exclude these properties: `message`, `warning`, `cursor`, `serverUsed`, `indexUsed`, `abTestVariantID`, `parsedQuery`, or any property triggered by the `getRankingInfo` parameter. Don't exclude properties that you might need in your search UI. """ - max_facet_hits: Optional[int] = Field(default=None, alias="maxFacetHits") + max_facet_hits: Optional[int] = None """ Maximum number of facet values to return when [searching for facet values](https://www.algolia.com/doc/guides/managing-results/refine-results/faceting/#search-for-facet-values). """ - max_values_per_facet: Optional[int] = Field(default=None, alias="maxValuesPerFacet") + max_values_per_facet: Optional[int] = None """ Maximum number of facet values to return for each facet. """ - sort_facet_values_by: Optional[str] = Field(default=None, alias="sortFacetValuesBy") + sort_facet_values_by: Optional[str] = None """ Order in which to retrieve facet values. - `count`. Facet values are retrieved by decreasing count. The count is the number of matching records containing this facet value. - `alpha`. Retrieve facet values alphabetically. This setting doesn't influence how facet values are displayed in your UI (see `renderingContent`). For more information, see [facet value display](https://www.algolia.com/doc/guides/building-search-ui/ui-and-ux-patterns/facet-display/js/). """ - attribute_criteria_computed_by_min_proximity: Optional[bool] = Field( - default=None, alias="attributeCriteriaComputedByMinProximity" - ) + attribute_criteria_computed_by_min_proximity: Optional[bool] = None """ Whether the best matching attribute should be determined by minimum proximity. This setting only affects ranking if the Attribute ranking criterion comes before Proximity in the `ranking` setting. If true, the best matching attribute is selected based on the minimum proximity of multiple matches. Otherwise, the best matching attribute is determined by the order in the `searchableAttributes` setting. """ - rendering_content: Optional[RenderingContent] = Field( - default=None, alias="renderingContent" - ) - enable_re_ranking: Optional[bool] = Field(default=None, alias="enableReRanking") + rendering_content: Optional[RenderingContent] = None + enable_re_ranking: Optional[bool] = None """ Whether this search will use [Dynamic Re-Ranking](https://www.algolia.com/doc/guides/algolia-ai/re-ranking/). This setting only has an effect if you activated Dynamic Re-Ranking for this index in the Algolia dashboard. """ - re_ranking_apply_filter: Optional[ReRankingApplyFilter] = Field( - default=None, alias="reRankingApplyFilter" - ) + re_ranking_apply_filter: Optional[ReRankingApplyFilter] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/languages.py b/algoliasearch/search/models/languages.py index 33fd6f26b..ae3990201 100644 --- a/algoliasearch/search/models/languages.py +++ b/algoliasearch/search/models/languages.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,21 +20,32 @@ from algoliasearch.search.models.dictionary_language import DictionaryLanguage +_ALIASES = { + "plurals": "plurals", + "stopwords": "stopwords", + "compounds": "compounds", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class Languages(BaseModel): """ Dictionary language. """ - plurals: DictionaryLanguage = Field(alias="plurals") - stopwords: DictionaryLanguage = Field(alias="stopwords") - compounds: DictionaryLanguage = Field(alias="compounds") + plurals: DictionaryLanguage + stopwords: DictionaryLanguage + compounds: DictionaryLanguage model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/list_api_keys_response.py b/algoliasearch/search/models/list_api_keys_response.py index 49066b327..ccee90ddc 100644 --- a/algoliasearch/search/models/list_api_keys_response.py +++ b/algoliasearch/search/models/list_api_keys_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,13 +20,21 @@ from algoliasearch.search.models.get_api_key_response import GetApiKeyResponse +_ALIASES = { + "keys": "keys", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ListApiKeysResponse(BaseModel): """ ListApiKeysResponse """ - keys: List[GetApiKeyResponse] = Field(alias="keys") + keys: List[GetApiKeyResponse] """ API keys. """ model_config = ConfigDict( @@ -34,6 +42,7 @@ class ListApiKeysResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/list_clusters_response.py b/algoliasearch/search/models/list_clusters_response.py index 98655ca43..90b562960 100644 --- a/algoliasearch/search/models/list_clusters_response.py +++ b/algoliasearch/search/models/list_clusters_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "top_users": "topUsers", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class ListClustersResponse(BaseModel): """ Clusters. """ - top_users: List[str] = Field(alias="topUsers") + top_users: List[str] """ Key-value pairs with cluster names as keys and lists of users with the highest number of records per cluster as values. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class ListClustersResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/list_indices_response.py b/algoliasearch/search/models/list_indices_response.py index 2c504d6a1..1a9aa9fac 100644 --- a/algoliasearch/search/models/list_indices_response.py +++ b/algoliasearch/search/models/list_indices_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,15 +20,24 @@ from algoliasearch.search.models.fetched_index import FetchedIndex +_ALIASES = { + "items": "items", + "nb_pages": "nbPages", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ListIndicesResponse(BaseModel): """ ListIndicesResponse """ - items: List[FetchedIndex] = Field(alias="items") + items: List[FetchedIndex] """ All indices in your Algolia application. """ - nb_pages: Optional[int] = Field(default=None, alias="nbPages") + nb_pages: Optional[int] = None """ Number of pages. """ model_config = ConfigDict( @@ -36,6 +45,7 @@ class ListIndicesResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/list_user_ids_response.py b/algoliasearch/search/models/list_user_ids_response.py index 7f25d4c7d..91e8f107e 100644 --- a/algoliasearch/search/models/list_user_ids_response.py +++ b/algoliasearch/search/models/list_user_ids_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,13 +20,21 @@ from algoliasearch.search.models.user_id import UserId +_ALIASES = { + "user_ids": "userIDs", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ListUserIdsResponse(BaseModel): """ User ID data. """ - user_ids: List[UserId] = Field(alias="userIDs") + user_ids: List[UserId] """ User IDs. """ model_config = ConfigDict( @@ -34,6 +42,7 @@ class ListUserIdsResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/log.py b/algoliasearch/search/models/log.py index 90f05ef3d..05909ac29 100644 --- a/algoliasearch/search/models/log.py +++ b/algoliasearch/search/models/log.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,41 +20,63 @@ from algoliasearch.search.models.log_query import LogQuery +_ALIASES = { + "timestamp": "timestamp", + "method": "method", + "answer_code": "answer_code", + "query_body": "query_body", + "answer": "answer", + "url": "url", + "ip": "ip", + "query_headers": "query_headers", + "sha1": "sha1", + "nb_api_calls": "nb_api_calls", + "processing_time_ms": "processing_time_ms", + "index": "index", + "var_query_params": "query_params", + "query_nb_hits": "query_nb_hits", + "inner_queries": "inner_queries", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class Log(BaseModel): """ Log """ - timestamp: str = Field(alias="timestamp") + timestamp: str """ Date and time of the API request, in RFC 3339 format. """ - method: str = Field(alias="method") + method: str """ HTTP method of the request. """ - answer_code: str = Field(alias="answer_code") + answer_code: str """ HTTP status code of the response. """ - query_body: str = Field(alias="query_body") + query_body: str """ Request body. """ - answer: str = Field(alias="answer") + answer: str """ Response body. """ - url: str = Field(alias="url") + url: str """ URL of the API endpoint. """ - ip: str = Field(alias="ip") + ip: str """ IP address of the client that performed the request. """ - query_headers: str = Field(alias="query_headers") + query_headers: str """ Request headers (API keys are obfuscated). """ - sha1: str = Field(alias="sha1") + sha1: str """ SHA1 signature of the log entry. """ - nb_api_calls: str = Field(alias="nb_api_calls") + nb_api_calls: str """ Number of API requests. """ - processing_time_ms: str = Field(alias="processing_time_ms") + processing_time_ms: str """ Processing time for the query in milliseconds. This doesn't include latency due to the network. """ - index: Optional[str] = Field(default=None, alias="index") + index: Optional[str] = None """ Index targeted by the query. """ - var_query_params: Optional[str] = Field(default=None, alias="query_params") + var_query_params: Optional[str] = None """ Query parameters sent with the request. """ - query_nb_hits: Optional[str] = Field(default=None, alias="query_nb_hits") + query_nb_hits: Optional[str] = None """ Number of search results (hits) returned for the query. """ - inner_queries: Optional[List[LogQuery]] = Field(default=None, alias="inner_queries") + inner_queries: Optional[List[LogQuery]] = None """ Queries performed for the given request. """ model_config = ConfigDict( @@ -62,6 +84,7 @@ class Log(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/log_query.py b/algoliasearch/search/models/log_query.py index 47ba27001..8b764c45d 100644 --- a/algoliasearch/search/models/log_query.py +++ b/algoliasearch/search/models/log_query.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "index_name": "index_name", + "user_token": "user_token", + "query_id": "query_id", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class LogQuery(BaseModel): """ LogQuery """ - index_name: Optional[str] = Field(default=None, alias="index_name") + index_name: Optional[str] = None """ Index targeted by the query. """ - user_token: Optional[str] = Field(default=None, alias="user_token") + user_token: Optional[str] = None """ A user identifier. """ - query_id: Optional[str] = Field(default=None, alias="query_id") + query_id: Optional[str] = None """ Unique query identifier. """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class LogQuery(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/matched_geo_location.py b/algoliasearch/search/models/matched_geo_location.py index b5de4a639..5384403b5 100644 --- a/algoliasearch/search/models/matched_geo_location.py +++ b/algoliasearch/search/models/matched_geo_location.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "lat": "lat", + "lng": "lng", + "distance": "distance", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class MatchedGeoLocation(BaseModel): """ MatchedGeoLocation """ - lat: Optional[float] = Field(default=None, alias="lat") + lat: Optional[float] = None """ Latitude of the matched location. """ - lng: Optional[float] = Field(default=None, alias="lng") + lng: Optional[float] = None """ Longitude of the matched location. """ - distance: Optional[int] = Field(default=None, alias="distance") + distance: Optional[int] = None """ Distance between the matched location and the search location (in meters). """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class MatchedGeoLocation(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/multiple_batch_request.py b/algoliasearch/search/models/multiple_batch_request.py index 758af41ad..daf5dee30 100644 --- a/algoliasearch/search/models/multiple_batch_request.py +++ b/algoliasearch/search/models/multiple_batch_request.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,16 +20,26 @@ from algoliasearch.search.models.action import Action +_ALIASES = { + "action": "action", + "body": "body", + "index_name": "indexName", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class MultipleBatchRequest(BaseModel): """ MultipleBatchRequest """ - action: Action = Field(alias="action") - body: Optional[object] = Field(default=None, alias="body") + action: Action + body: Optional[object] = None """ Operation arguments (varies with specified `action`). """ - index_name: str = Field(alias="indexName") + index_name: str """ Index name (case-sensitive). """ model_config = ConfigDict( @@ -37,6 +47,7 @@ class MultipleBatchRequest(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/multiple_batch_response.py b/algoliasearch/search/models/multiple_batch_response.py index 758db396f..ff1a3bd25 100644 --- a/algoliasearch/search/models/multiple_batch_response.py +++ b/algoliasearch/search/models/multiple_batch_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "task_id": "taskID", + "object_ids": "objectIDs", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class MultipleBatchResponse(BaseModel): """ MultipleBatchResponse """ - task_id: Dict[str, int] = Field(alias="taskID") + task_id: Dict[str, int] """ Task IDs. One for each index. """ - object_ids: List[str] = Field(alias="objectIDs") + object_ids: List[str] """ Unique record identifiers. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class MultipleBatchResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/numeric_filters.py b/algoliasearch/search/models/numeric_filters.py index bd805e7e8..417b7f4d3 100644 --- a/algoliasearch/search/models/numeric_filters.py +++ b/algoliasearch/search/models/numeric_filters.py @@ -27,7 +27,7 @@ class NumericFilters(BaseModel): oneof_schema_2_validator: Optional[str] = Field(default=None) - actual_instance: Optional[Union[List[NumericFilters], str]] = None + actual_instance: Union[List[NumericFilters], str, None] = None one_of_schemas: Set[str] = {"List[NumericFilters]", "str"} def __init__(self, *args, **kwargs) -> None: @@ -40,12 +40,12 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[List[NumericFilters], str]]: + def unwrap_actual_instance(self) -> Union[List[NumericFilters], str, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -88,9 +88,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -100,8 +100,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], List[NumericFilters], str]]: return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/search/models/operation_index_params.py b/algoliasearch/search/models/operation_index_params.py index cb6cbb3c5..48e0e0d09 100644 --- a/algoliasearch/search/models/operation_index_params.py +++ b/algoliasearch/search/models/operation_index_params.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,16 +21,26 @@ from algoliasearch.search.models.operation_type import OperationType from algoliasearch.search.models.scope_type import ScopeType +_ALIASES = { + "operation": "operation", + "destination": "destination", + "scope": "scope", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class OperationIndexParams(BaseModel): """ OperationIndexParams """ - operation: OperationType = Field(alias="operation") - destination: str = Field(alias="destination") + operation: OperationType + destination: str """ Index name (case-sensitive). """ - scope: Optional[List[ScopeType]] = Field(default=None, alias="scope") + scope: Optional[List[ScopeType]] = None """ **Only for copying.** If you specify a scope, only the selected scopes are copied. Records and the other scopes are left unchanged. If you omit the `scope` parameter, everything is copied: records, settings, synonyms, and rules. """ model_config = ConfigDict( @@ -38,6 +48,7 @@ class OperationIndexParams(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/optional_filters.py b/algoliasearch/search/models/optional_filters.py index 08edec6fb..30f02effb 100644 --- a/algoliasearch/search/models/optional_filters.py +++ b/algoliasearch/search/models/optional_filters.py @@ -27,7 +27,7 @@ class OptionalFilters(BaseModel): oneof_schema_2_validator: Optional[str] = Field(default=None) - actual_instance: Optional[Union[List[OptionalFilters], str]] = None + actual_instance: Union[List[OptionalFilters], str, None] = None one_of_schemas: Set[str] = {"List[OptionalFilters]", "str"} def __init__(self, *args, **kwargs) -> None: @@ -40,12 +40,12 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[List[OptionalFilters], str]]: + def unwrap_actual_instance(self) -> Union[List[OptionalFilters], str, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -88,9 +88,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -100,8 +100,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], List[OptionalFilters], str]] return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/search/models/personalization.py b/algoliasearch/search/models/personalization.py index a6dcf022c..b04703d41 100644 --- a/algoliasearch/search/models/personalization.py +++ b/algoliasearch/search/models/personalization.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "filters_score": "filtersScore", + "ranking_score": "rankingScore", + "score": "score", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class Personalization(BaseModel): """ Personalization """ - filters_score: Optional[int] = Field(default=None, alias="filtersScore") + filters_score: Optional[int] = None """ The score of the filters. """ - ranking_score: Optional[int] = Field(default=None, alias="rankingScore") + ranking_score: Optional[int] = None """ The score of the ranking. """ - score: Optional[int] = Field(default=None, alias="score") + score: Optional[int] = None """ The score of the event. """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class Personalization(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/promote.py b/algoliasearch/search/models/promote.py index aaa1c0c10..6736ab329 100644 --- a/algoliasearch/search/models/promote.py +++ b/algoliasearch/search/models/promote.py @@ -31,7 +31,7 @@ class Promote(BaseModel): oneof_schema_2_validator: Optional[PromoteObjectID] = Field(default=None) - actual_instance: Optional[Union[PromoteObjectID, PromoteObjectIDs]] = None + actual_instance: Union[PromoteObjectID, PromoteObjectIDs, None] = None one_of_schemas: Set[str] = {"PromoteObjectID", "PromoteObjectIDs"} def __init__(self, *args, **kwargs) -> None: @@ -44,14 +44,14 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer def unwrap_actual_instance( self, - ) -> Optional[Union[PromoteObjectID, PromoteObjectIDs]]: + ) -> Union[PromoteObjectID, PromoteObjectIDs, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -92,9 +92,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -106,8 +106,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/search/models/promote_object_id.py b/algoliasearch/search/models/promote_object_id.py index 9652e16b6..04daf1cd0 100644 --- a/algoliasearch/search/models/promote_object_id.py +++ b/algoliasearch/search/models/promote_object_id.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "object_id": "objectID", + "position": "position", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class PromoteObjectID(BaseModel): """ Record to promote. """ - object_id: str = Field(alias="objectID") + object_id: str """ Unique record identifier. """ - position: int = Field(alias="position") + position: int """ Position in the search results where you want to show the promoted records. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class PromoteObjectID(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/promote_object_ids.py b/algoliasearch/search/models/promote_object_ids.py index d19e755b1..5c68bbe00 100644 --- a/algoliasearch/search/models/promote_object_ids.py +++ b/algoliasearch/search/models/promote_object_ids.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "object_ids": "objectIDs", + "position": "position", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class PromoteObjectIDs(BaseModel): """ Records to promote. """ - object_ids: List[str] = Field(alias="objectIDs") + object_ids: List[str] """ Object IDs of the records you want to promote. The records are placed as a group at the `position`. For example, if you want to promote four records to position `0`, they will be the first four search results. """ - position: int = Field(alias="position") + position: int """ Position in the search results where you want to show the promoted records. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class PromoteObjectIDs(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/range.py b/algoliasearch/search/models/range.py index 4cda204fa..afbbc148b 100644 --- a/algoliasearch/search/models/range.py +++ b/algoliasearch/search/models/range.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "var_from": "from", + "value": "value", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class Range(BaseModel): """ Range object with lower and upper values in meters to define custom ranges. """ - var_from: Optional[int] = Field(default=None, alias="from") + var_from: Optional[int] = None """ Lower boundary of a range in meters. The Geo ranking criterion considers all records within the range to be equal. """ - value: Optional[int] = Field(default=None, alias="value") + value: Optional[int] = None """ Upper boundary of a range in meters. The Geo ranking criterion considers all records within the range to be equal. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class Range(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/ranking_info.py b/algoliasearch/search/models/ranking_info.py index fd71e5264..b627bc2ff 100644 --- a/algoliasearch/search/models/ranking_info.py +++ b/algoliasearch/search/models/ranking_info.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,41 +21,55 @@ from algoliasearch.search.models.matched_geo_location import MatchedGeoLocation from algoliasearch.search.models.personalization import Personalization +_ALIASES = { + "filters": "filters", + "first_matched_word": "firstMatchedWord", + "geo_distance": "geoDistance", + "geo_precision": "geoPrecision", + "matched_geo_location": "matchedGeoLocation", + "personalization": "personalization", + "nb_exact_words": "nbExactWords", + "nb_typos": "nbTypos", + "promoted": "promoted", + "proximity_distance": "proximityDistance", + "user_score": "userScore", + "words": "words", + "promoted_by_re_ranking": "promotedByReRanking", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class RankingInfo(BaseModel): """ Object with detailed information about the record's ranking. """ - filters: Optional[int] = Field(default=None, alias="filters") + filters: Optional[int] = None """ Whether a filter matched the query. """ - first_matched_word: int = Field(alias="firstMatchedWord") + first_matched_word: int """ Position of the first matched word in the best matching attribute of the record. """ - geo_distance: int = Field(alias="geoDistance") + geo_distance: int """ Distance between the geo location in the search query and the best matching geo location in the record, divided by the geo precision (in meters). """ - geo_precision: Optional[int] = Field(default=None, alias="geoPrecision") + geo_precision: Optional[int] = None """ Precision used when computing the geo distance, in meters. """ - matched_geo_location: Optional[MatchedGeoLocation] = Field( - default=None, alias="matchedGeoLocation" - ) - personalization: Optional[Personalization] = Field( - default=None, alias="personalization" - ) - nb_exact_words: int = Field(alias="nbExactWords") + matched_geo_location: Optional[MatchedGeoLocation] = None + personalization: Optional[Personalization] = None + nb_exact_words: int """ Number of exactly matched words. """ - nb_typos: int = Field(alias="nbTypos") + nb_typos: int """ Number of typos encountered when matching the record. """ - promoted: Optional[bool] = Field(default=None, alias="promoted") + promoted: Optional[bool] = None """ Whether the record was promoted by a rule. """ - proximity_distance: Optional[int] = Field(default=None, alias="proximityDistance") + proximity_distance: Optional[int] = None """ Number of words between multiple matches in the query plus 1. For single word queries, `proximityDistance` is 0. """ - user_score: int = Field(alias="userScore") + user_score: int """ Overall ranking of the record, expressed as a single integer. This attribute is internal. """ - words: Optional[int] = Field(default=None, alias="words") + words: Optional[int] = None """ Number of matched words. """ - promoted_by_re_ranking: Optional[bool] = Field( - default=None, alias="promotedByReRanking" - ) + promoted_by_re_ranking: Optional[bool] = None """ Whether the record is re-ranked. """ model_config = ConfigDict( @@ -63,6 +77,7 @@ class RankingInfo(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/re_ranking_apply_filter.py b/algoliasearch/search/models/re_ranking_apply_filter.py index 39053345c..5718f7ba2 100644 --- a/algoliasearch/search/models/re_ranking_apply_filter.py +++ b/algoliasearch/search/models/re_ranking_apply_filter.py @@ -27,7 +27,7 @@ class ReRankingApplyFilter(BaseModel): oneof_schema_2_validator: Optional[str] = Field(default=None) - actual_instance: Optional[Union[List[ReRankingApplyFilter], str]] = None + actual_instance: Union[List[ReRankingApplyFilter], str, None] = None one_of_schemas: Set[str] = {"List[ReRankingApplyFilter]", "str"} def __init__(self, *args, **kwargs) -> None: @@ -40,14 +40,14 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer def unwrap_actual_instance( self, - ) -> Optional[Union[List[ReRankingApplyFilter], str]]: + ) -> Union[List[ReRankingApplyFilter], str, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -90,9 +90,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -104,8 +104,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/search/models/redirect.py b/algoliasearch/search/models/redirect.py index 30c032127..878516d0e 100644 --- a/algoliasearch/search/models/redirect.py +++ b/algoliasearch/search/models/redirect.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,21 +22,28 @@ RedirectRuleIndexMetadata, ) +_ALIASES = { + "index": "index", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class Redirect(BaseModel): """ [Redirect results to a URL](https://www.algolia.com/doc/guides/managing-results/rules/merchandising-and-promoting/how-to/redirects/), this this parameter is for internal use only. """ - index: Optional[List[RedirectRuleIndexMetadata]] = Field( - default=None, alias="index" - ) + index: Optional[List[RedirectRuleIndexMetadata]] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/redirect_rule_index_data.py b/algoliasearch/search/models/redirect_rule_index_data.py index bb6826e62..27ae7d300 100644 --- a/algoliasearch/search/models/redirect_rule_index_data.py +++ b/algoliasearch/search/models/redirect_rule_index_data.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,28 @@ from typing_extensions import Self +_ALIASES = { + "rule_object_id": "ruleObjectID", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class RedirectRuleIndexData(BaseModel): """ Redirect rule data. """ - rule_object_id: str = Field(alias="ruleObjectID") + rule_object_id: str model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/redirect_rule_index_metadata.py b/algoliasearch/search/models/redirect_rule_index_metadata.py index 2ae66259e..ba4c9edfe 100644 --- a/algoliasearch/search/models/redirect_rule_index_metadata.py +++ b/algoliasearch/search/models/redirect_rule_index_metadata.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,27 +20,40 @@ from algoliasearch.search.models.redirect_rule_index_data import RedirectRuleIndexData +_ALIASES = { + "source": "source", + "dest": "dest", + "reason": "reason", + "succeed": "succeed", + "data": "data", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class RedirectRuleIndexMetadata(BaseModel): """ RedirectRuleIndexMetadata """ - source: str = Field(alias="source") + source: str """ Source index for the redirect rule. """ - dest: str = Field(alias="dest") + dest: str """ Destination index for the redirect rule. """ - reason: str = Field(alias="reason") + reason: str """ Reason for the redirect rule. """ - succeed: bool = Field(alias="succeed") + succeed: bool """ Redirect rule status. """ - data: RedirectRuleIndexData = Field(alias="data") + data: RedirectRuleIndexData model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/redirect_url.py b/algoliasearch/search/models/redirect_url.py index fce44987e..f3d9ab8bd 100644 --- a/algoliasearch/search/models/redirect_url.py +++ b/algoliasearch/search/models/redirect_url.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,28 @@ from typing_extensions import Self +_ALIASES = { + "url": "url", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class RedirectURL(BaseModel): """ The redirect rule container. """ - url: Optional[str] = Field(default=None, alias="url") + url: Optional[str] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/remove_stop_words.py b/algoliasearch/search/models/remove_stop_words.py index 828055465..112f6f768 100644 --- a/algoliasearch/search/models/remove_stop_words.py +++ b/algoliasearch/search/models/remove_stop_words.py @@ -30,7 +30,7 @@ class RemoveStopWords(BaseModel): """ ISO code for languages for which stop words should be removed. This overrides languages you set in `queryLanguges`. """ oneof_schema_2_validator: Optional[bool] = Field(default=None) """ If true, stop words are removed for all languages you included in `queryLanguages`, or for all supported languages, if `queryLanguages` is empty. If false, stop words are not removed. """ - actual_instance: Optional[Union[List[SupportedLanguage], bool]] = None + actual_instance: Union[List[SupportedLanguage], bool, None] = None one_of_schemas: Set[str] = {"List[SupportedLanguage]", "bool"} def __init__(self, *args, **kwargs) -> None: @@ -43,12 +43,14 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[List[SupportedLanguage], bool]]: + def unwrap_actual_instance( + self, + ) -> Union[List[SupportedLanguage], bool, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -91,9 +93,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -103,8 +105,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], List[SupportedLanguage], boo return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/search/models/remove_user_id_response.py b/algoliasearch/search/models/remove_user_id_response.py index 29659a3fd..9b3e298de 100644 --- a/algoliasearch/search/models/remove_user_id_response.py +++ b/algoliasearch/search/models/remove_user_id_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "deleted_at": "deletedAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class RemoveUserIdResponse(BaseModel): """ RemoveUserIdResponse """ - deleted_at: str = Field(alias="deletedAt") + deleted_at: str """ Date and time when the object was deleted, in RFC 3339 format. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class RemoveUserIdResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/rendering_content.py b/algoliasearch/search/models/rendering_content.py index 81782d2c6..4795103b5 100644 --- a/algoliasearch/search/models/rendering_content.py +++ b/algoliasearch/search/models/rendering_content.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,6 +20,17 @@ from algoliasearch.search.models.facet_ordering import FacetOrdering from algoliasearch.search.models.redirect_url import RedirectURL +from algoliasearch.search.models.widgets import Widgets + +_ALIASES = { + "facet_ordering": "facetOrdering", + "redirect": "redirect", + "widgets": "widgets", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) class RenderingContent(BaseModel): @@ -27,14 +38,16 @@ class RenderingContent(BaseModel): Extra data that can be used in the search UI. You can use this to control aspects of your search UI, such as, the order of facet names and values without changing your frontend code. """ - facet_ordering: Optional[FacetOrdering] = Field(default=None, alias="facetOrdering") - redirect: Optional[RedirectURL] = Field(default=None, alias="redirect") + facet_ordering: Optional[FacetOrdering] = None + redirect: Optional[RedirectURL] = None + widgets: Optional[Widgets] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: @@ -72,5 +85,10 @@ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: if obj.get("redirect") is not None else None ) + obj["widgets"] = ( + Widgets.from_dict(obj["widgets"]) + if obj.get("widgets") is not None + else None + ) return cls.model_validate(obj) diff --git a/algoliasearch/search/models/replace_all_objects_response.py b/algoliasearch/search/models/replace_all_objects_response.py index 89cb3363f..4abdd1cef 100644 --- a/algoliasearch/search/models/replace_all_objects_response.py +++ b/algoliasearch/search/models/replace_all_objects_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,22 +21,33 @@ from algoliasearch.search.models.batch_response import BatchResponse from algoliasearch.search.models.updated_at_response import UpdatedAtResponse +_ALIASES = { + "copy_operation_response": "copyOperationResponse", + "batch_responses": "batchResponses", + "move_operation_response": "moveOperationResponse", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class ReplaceAllObjectsResponse(BaseModel): """ ReplaceAllObjectsResponse """ - copy_operation_response: UpdatedAtResponse = Field(alias="copyOperationResponse") - batch_responses: List[BatchResponse] = Field(alias="batchResponses") + copy_operation_response: UpdatedAtResponse + batch_responses: List[BatchResponse] """ The response of the `batch` request(s). """ - move_operation_response: UpdatedAtResponse = Field(alias="moveOperationResponse") + move_operation_response: UpdatedAtResponse model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/replace_source_response.py b/algoliasearch/search/models/replace_source_response.py index b422652a4..2c9ecbfe4 100644 --- a/algoliasearch/search/models/replace_source_response.py +++ b/algoliasearch/search/models/replace_source_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "updated_at": "updatedAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class ReplaceSourceResponse(BaseModel): """ ReplaceSourceResponse """ - updated_at: str = Field(alias="updatedAt") + updated_at: str """ Date and time when the object was updated, in RFC 3339 format. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class ReplaceSourceResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/rule.py b/algoliasearch/search/models/rule.py index 6dee4a44b..fdf5adc1a 100644 --- a/algoliasearch/search/models/rule.py +++ b/algoliasearch/search/models/rule.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -22,22 +22,35 @@ from algoliasearch.search.models.consequence import Consequence from algoliasearch.search.models.time_range import TimeRange +_ALIASES = { + "object_id": "objectID", + "conditions": "conditions", + "consequence": "consequence", + "description": "description", + "enabled": "enabled", + "validity": "validity", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class Rule(BaseModel): """ Rule object. """ - object_id: str = Field(alias="objectID") + object_id: str """ Unique identifier of a rule object. """ - conditions: Optional[List[Condition]] = Field(default=None, alias="conditions") + conditions: Optional[List[Condition]] = None """ Conditions that trigger a rule. Some consequences require specific conditions or don't require any condition. For more information, see [Conditions](https://www.algolia.com/doc/guides/managing-results/rules/rules-overview/#conditions). """ - consequence: Optional[Consequence] = Field(default=None, alias="consequence") - description: Optional[str] = Field(default=None, alias="description") + consequence: Optional[Consequence] = None + description: Optional[str] = None """ Description of the rule's purpose to help you distinguish between different rules. """ - enabled: Optional[bool] = Field(default=None, alias="enabled") + enabled: Optional[bool] = None """ Whether the rule is active. """ - validity: Optional[List[TimeRange]] = Field(default=None, alias="validity") + validity: Optional[List[TimeRange]] = None """ Time periods when the rule is active. """ model_config = ConfigDict( @@ -45,6 +58,7 @@ class Rule(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/save_object_response.py b/algoliasearch/search/models/save_object_response.py index 7d9d757a8..cb43f3f1a 100644 --- a/algoliasearch/search/models/save_object_response.py +++ b/algoliasearch/search/models/save_object_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "created_at": "createdAt", + "task_id": "taskID", + "object_id": "objectID", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class SaveObjectResponse(BaseModel): """ SaveObjectResponse """ - created_at: str = Field(alias="createdAt") + created_at: str """ Date and time when the object was created, in RFC 3339 format. """ - task_id: int = Field(alias="taskID") + task_id: int """ Unique identifier of a task. A successful API response means that a task was added to a queue. It might not run immediately. You can check the task's progress with the [`task` operation](#tag/Indices/operation/getTask) and this `taskID`. """ - object_id: Optional[str] = Field(default=None, alias="objectID") + object_id: Optional[str] = None """ Unique record identifier. """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class SaveObjectResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/save_synonym_response.py b/algoliasearch/search/models/save_synonym_response.py index 9bf9e357a..83a626078 100644 --- a/algoliasearch/search/models/save_synonym_response.py +++ b/algoliasearch/search/models/save_synonym_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "task_id": "taskID", + "updated_at": "updatedAt", + "id": "id", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class SaveSynonymResponse(BaseModel): """ SaveSynonymResponse """ - task_id: int = Field(alias="taskID") + task_id: int """ Unique identifier of a task. A successful API response means that a task was added to a queue. It might not run immediately. You can check the task's progress with the [`task` operation](#tag/Indices/operation/getTask) and this `taskID`. """ - updated_at: str = Field(alias="updatedAt") + updated_at: str """ Date and time when the object was updated, in RFC 3339 format. """ - id: str = Field(alias="id") + id: str """ Unique identifier of a synonym object. """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class SaveSynonymResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/search_dictionary_entries_params.py b/algoliasearch/search/models/search_dictionary_entries_params.py index 955fb69a1..f4a934741 100644 --- a/algoliasearch/search/models/search_dictionary_entries_params.py +++ b/algoliasearch/search/models/search_dictionary_entries_params.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,25 +20,37 @@ from algoliasearch.search.models.supported_language import SupportedLanguage +_ALIASES = { + "query": "query", + "page": "page", + "hits_per_page": "hitsPerPage", + "language": "language", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SearchDictionaryEntriesParams(BaseModel): """ Search parameter. """ - query: str = Field(alias="query") + query: str """ Search query. """ - page: Optional[int] = Field(default=None, alias="page") + page: Optional[int] = None """ Page of search results to retrieve. """ - hits_per_page: Optional[int] = Field(default=None, alias="hitsPerPage") + hits_per_page: Optional[int] = None """ Number of hits per page. """ - language: Optional[SupportedLanguage] = Field(default=None, alias="language") + language: Optional[SupportedLanguage] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/search_dictionary_entries_response.py b/algoliasearch/search/models/search_dictionary_entries_response.py index 7b96fa125..20803514d 100644 --- a/algoliasearch/search/models/search_dictionary_entries_response.py +++ b/algoliasearch/search/models/search_dictionary_entries_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,30 @@ from algoliasearch.search.models.dictionary_entry import DictionaryEntry +_ALIASES = { + "hits": "hits", + "page": "page", + "nb_hits": "nbHits", + "nb_pages": "nbPages", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SearchDictionaryEntriesResponse(BaseModel): """ SearchDictionaryEntriesResponse """ - hits: List[DictionaryEntry] = Field(alias="hits") + hits: List[DictionaryEntry] """ Dictionary entries matching the search criteria. """ - page: int = Field(alias="page") + page: int """ Requested page of the API response. """ - nb_hits: int = Field(alias="nbHits") + nb_hits: int """ Number of results (hits). """ - nb_pages: int = Field(alias="nbPages") + nb_pages: int """ Number of pages of results. """ model_config = ConfigDict( @@ -40,6 +51,7 @@ class SearchDictionaryEntriesResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/search_for_facet_values_request.py b/algoliasearch/search/models/search_for_facet_values_request.py index 3a83de727..920e93042 100644 --- a/algoliasearch/search/models/search_for_facet_values_request.py +++ b/algoliasearch/search/models/search_for_facet_values_request.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "params": "params", + "facet_query": "facetQuery", + "max_facet_hits": "maxFacetHits", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class SearchForFacetValuesRequest(BaseModel): """ SearchForFacetValuesRequest """ - params: Optional[str] = Field(default=None, alias="params") + params: Optional[str] = None """ Search parameters as a URL-encoded query string. """ - facet_query: Optional[str] = Field(default=None, alias="facetQuery") + facet_query: Optional[str] = None """ Text to search inside the facet's values. """ - max_facet_hits: Optional[int] = Field(default=None, alias="maxFacetHits") + max_facet_hits: Optional[int] = None """ Maximum number of facet values to return when [searching for facet values](https://www.algolia.com/doc/guides/managing-results/refine-results/faceting/#search-for-facet-values). """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class SearchForFacetValuesRequest(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/search_for_facet_values_response.py b/algoliasearch/search/models/search_for_facet_values_response.py index 2afdd0002..014eb61d8 100644 --- a/algoliasearch/search/models/search_for_facet_values_response.py +++ b/algoliasearch/search/models/search_for_facet_values_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,17 +20,27 @@ from algoliasearch.search.models.facet_hits import FacetHits +_ALIASES = { + "facet_hits": "facetHits", + "exhaustive_facets_count": "exhaustiveFacetsCount", + "processing_time_ms": "processingTimeMS", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SearchForFacetValuesResponse(BaseModel): """ SearchForFacetValuesResponse """ - facet_hits: List[FacetHits] = Field(alias="facetHits") + facet_hits: List[FacetHits] """ Matching facet values. """ - exhaustive_facets_count: bool = Field(alias="exhaustiveFacetsCount") + exhaustive_facets_count: bool """ Whether the facet count is exhaustive (true) or approximate (false). For more information, see [Why are my facet and hit counts not accurate](https://support.algolia.com/hc/en-us/articles/4406975248145-Why-are-my-facet-and-hit-counts-not-accurate-). """ - processing_time_ms: Optional[int] = Field(default=None, alias="processingTimeMS") + processing_time_ms: Optional[int] = None """ Time the server took to process the request, in milliseconds. """ model_config = ConfigDict( @@ -38,6 +48,7 @@ class SearchForFacetValuesResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/search_for_facets.py b/algoliasearch/search/models/search_for_facets.py index 49a8becf2..aff7628ad 100644 --- a/algoliasearch/search/models/search_for_facets.py +++ b/algoliasearch/search/models/search_for_facets.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -44,236 +44,251 @@ from algoliasearch.search.models.tag_filters import TagFilters from algoliasearch.search.models.typo_tolerance import TypoTolerance +_ALIASES = { + "params": "params", + "query": "query", + "similar_query": "similarQuery", + "filters": "filters", + "facet_filters": "facetFilters", + "optional_filters": "optionalFilters", + "numeric_filters": "numericFilters", + "tag_filters": "tagFilters", + "sum_or_filters_scores": "sumOrFiltersScores", + "restrict_searchable_attributes": "restrictSearchableAttributes", + "facets": "facets", + "faceting_after_distinct": "facetingAfterDistinct", + "page": "page", + "offset": "offset", + "length": "length", + "around_lat_lng": "aroundLatLng", + "around_lat_lng_via_ip": "aroundLatLngViaIP", + "around_radius": "aroundRadius", + "around_precision": "aroundPrecision", + "minimum_around_radius": "minimumAroundRadius", + "inside_bounding_box": "insideBoundingBox", + "inside_polygon": "insidePolygon", + "natural_languages": "naturalLanguages", + "rule_contexts": "ruleContexts", + "personalization_impact": "personalizationImpact", + "user_token": "userToken", + "get_ranking_info": "getRankingInfo", + "synonyms": "synonyms", + "click_analytics": "clickAnalytics", + "analytics": "analytics", + "analytics_tags": "analyticsTags", + "percentile_computation": "percentileComputation", + "enable_ab_test": "enableABTest", + "attributes_to_retrieve": "attributesToRetrieve", + "ranking": "ranking", + "custom_ranking": "customRanking", + "relevancy_strictness": "relevancyStrictness", + "attributes_to_highlight": "attributesToHighlight", + "attributes_to_snippet": "attributesToSnippet", + "highlight_pre_tag": "highlightPreTag", + "highlight_post_tag": "highlightPostTag", + "snippet_ellipsis_text": "snippetEllipsisText", + "restrict_highlight_and_snippet_arrays": "restrictHighlightAndSnippetArrays", + "hits_per_page": "hitsPerPage", + "min_word_sizefor1_typo": "minWordSizefor1Typo", + "min_word_sizefor2_typos": "minWordSizefor2Typos", + "typo_tolerance": "typoTolerance", + "allow_typos_on_numeric_tokens": "allowTyposOnNumericTokens", + "disable_typo_tolerance_on_attributes": "disableTypoToleranceOnAttributes", + "ignore_plurals": "ignorePlurals", + "remove_stop_words": "removeStopWords", + "keep_diacritics_on_characters": "keepDiacriticsOnCharacters", + "query_languages": "queryLanguages", + "decompound_query": "decompoundQuery", + "enable_rules": "enableRules", + "enable_personalization": "enablePersonalization", + "query_type": "queryType", + "remove_words_if_no_results": "removeWordsIfNoResults", + "mode": "mode", + "semantic_search": "semanticSearch", + "advanced_syntax": "advancedSyntax", + "optional_words": "optionalWords", + "disable_exact_on_attributes": "disableExactOnAttributes", + "exact_on_single_word_query": "exactOnSingleWordQuery", + "alternatives_as_exact": "alternativesAsExact", + "advanced_syntax_features": "advancedSyntaxFeatures", + "distinct": "distinct", + "replace_synonyms_in_highlight": "replaceSynonymsInHighlight", + "min_proximity": "minProximity", + "response_fields": "responseFields", + "max_facet_hits": "maxFacetHits", + "max_values_per_facet": "maxValuesPerFacet", + "sort_facet_values_by": "sortFacetValuesBy", + "attribute_criteria_computed_by_min_proximity": "attributeCriteriaComputedByMinProximity", + "rendering_content": "renderingContent", + "enable_re_ranking": "enableReRanking", + "re_ranking_apply_filter": "reRankingApplyFilter", + "facet": "facet", + "index_name": "indexName", + "facet_query": "facetQuery", + "type": "type", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SearchForFacets(BaseModel): """ SearchForFacets """ - params: Optional[str] = Field(default=None, alias="params") + params: Optional[str] = None """ Search parameters as a URL-encoded query string. """ - query: Optional[str] = Field(default=None, alias="query") + query: Optional[str] = None """ Search query. """ - similar_query: Optional[str] = Field(default=None, alias="similarQuery") + similar_query: Optional[str] = None """ Keywords to be used instead of the search query to conduct a more broader search. Using the `similarQuery` parameter changes other settings: - `queryType` is set to `prefixNone`. - `removeStopWords` is set to true. - `words` is set as the first ranking criterion. - All remaining words are treated as `optionalWords`. Since the `similarQuery` is supposed to do a broad search, they usually return many results. Combine it with `filters` to narrow down the list of results. """ - filters: Optional[str] = Field(default=None, alias="filters") + filters: Optional[str] = None """ Filter expression to only include items that match the filter criteria in the response. You can use these filter expressions: - **Numeric filters.** ` `, where `` is one of `<`, `<=`, `=`, `!=`, `>`, `>=`. - **Ranges.** `: TO ` where `` and `` are the lower and upper limits of the range (inclusive). - **Facet filters.** `:` where `` is a facet attribute (case-sensitive) and `` a facet value. - **Tag filters.** `_tags:` or just `` (case-sensitive). - **Boolean filters.** `: true | false`. You can combine filters with `AND`, `OR`, and `NOT` operators with the following restrictions: - You can only combine filters of the same type with `OR`. **Not supported:** `facet:value OR num > 3`. - You can't use `NOT` with combinations of filters. **Not supported:** `NOT(facet:value OR facet:value)` - You can't combine conjunctions (`AND`) with `OR`. **Not supported:** `facet:value OR (facet:value AND facet:value)` Use quotes around your filters, if the facet attribute name or facet value has spaces, keywords (`OR`, `AND`, `NOT`), or quotes. If a facet attribute is an array, the filter matches if it matches at least one element of the array. For more information, see [Filters](https://www.algolia.com/doc/guides/managing-results/refine-results/filtering/). """ - facet_filters: Optional[FacetFilters] = Field(default=None, alias="facetFilters") - optional_filters: Optional[OptionalFilters] = Field( - default=None, alias="optionalFilters" - ) - numeric_filters: Optional[NumericFilters] = Field( - default=None, alias="numericFilters" - ) - tag_filters: Optional[TagFilters] = Field(default=None, alias="tagFilters") - sum_or_filters_scores: Optional[bool] = Field( - default=None, alias="sumOrFiltersScores" - ) + facet_filters: Optional[FacetFilters] = None + optional_filters: Optional[OptionalFilters] = None + numeric_filters: Optional[NumericFilters] = None + tag_filters: Optional[TagFilters] = None + sum_or_filters_scores: Optional[bool] = None """ Whether to sum all filter scores. If true, all filter scores are summed. Otherwise, the maximum filter score is kept. For more information, see [filter scores](https://www.algolia.com/doc/guides/managing-results/refine-results/filtering/in-depth/filter-scoring/#accumulating-scores-with-sumorfiltersscores). """ - restrict_searchable_attributes: Optional[List[str]] = Field( - default=None, alias="restrictSearchableAttributes" - ) + restrict_searchable_attributes: Optional[List[str]] = None """ Restricts a search to a subset of your searchable attributes. Attribute names are case-sensitive. """ - facets: Optional[List[str]] = Field(default=None, alias="facets") + facets: Optional[List[str]] = None """ Facets for which to retrieve facet values that match the search criteria and the number of matching facet values. To retrieve all facets, use the wildcard character `*`. For more information, see [facets](https://www.algolia.com/doc/guides/managing-results/refine-results/faceting/#contextual-facet-values-and-counts). """ - faceting_after_distinct: Optional[bool] = Field( - default=None, alias="facetingAfterDistinct" - ) + faceting_after_distinct: Optional[bool] = None """ Whether faceting should be applied after deduplication with `distinct`. This leads to accurate facet counts when using faceting in combination with `distinct`. It's usually better to use `afterDistinct` modifiers in the `attributesForFaceting` setting, as `facetingAfterDistinct` only computes correct facet counts if all records have the same facet values for the `attributeForDistinct`. """ - page: Optional[int] = Field(default=None, alias="page") + page: Optional[int] = None """ Page of search results to retrieve. """ - offset: Optional[int] = Field(default=None, alias="offset") + offset: Optional[int] = None """ Position of the first hit to retrieve. """ - length: Optional[int] = Field(default=None, alias="length") + length: Optional[int] = None """ Number of hits to retrieve (used in combination with `offset`). """ - around_lat_lng: Optional[str] = Field(default=None, alias="aroundLatLng") + around_lat_lng: Optional[str] = None """ Coordinates for the center of a circle, expressed as a comma-separated string of latitude and longitude. Only records included within circle around this central location are included in the results. The radius of the circle is determined by the `aroundRadius` and `minimumAroundRadius` settings. This parameter is ignored if you also specify `insidePolygon` or `insideBoundingBox`. """ - around_lat_lng_via_ip: Optional[bool] = Field( - default=None, alias="aroundLatLngViaIP" - ) + around_lat_lng_via_ip: Optional[bool] = None """ Whether to obtain the coordinates from the request's IP address. """ - around_radius: Optional[AroundRadius] = Field(default=None, alias="aroundRadius") - around_precision: Optional[AroundPrecision] = Field( - default=None, alias="aroundPrecision" - ) - minimum_around_radius: Optional[int] = Field( - default=None, alias="minimumAroundRadius" - ) + around_radius: Optional[AroundRadius] = None + around_precision: Optional[AroundPrecision] = None + minimum_around_radius: Optional[int] = None """ Minimum radius (in meters) for a search around a location when `aroundRadius` isn't set. """ - inside_bounding_box: Optional[List[List[float]]] = Field( - default=None, alias="insideBoundingBox" - ) + inside_bounding_box: Optional[List[List[float]]] = None """ Coordinates for a rectangular area in which to search. Each bounding box is defined by the two opposite points of its diagonal, and expressed as latitude and longitude pair: `[p1 lat, p1 long, p2 lat, p2 long]`. Provide multiple bounding boxes as nested arrays. For more information, see [rectangular area](https://www.algolia.com/doc/guides/managing-results/refine-results/geolocation/#filtering-inside-rectangular-or-polygonal-areas). """ - inside_polygon: Optional[List[List[float]]] = Field( - default=None, alias="insidePolygon" - ) + inside_polygon: Optional[List[List[float]]] = None """ Coordinates of a polygon in which to search. Polygons are defined by 3 to 10,000 points. Each point is represented by its latitude and longitude. Provide multiple polygons as nested arrays. For more information, see [filtering inside polygons](https://www.algolia.com/doc/guides/managing-results/refine-results/geolocation/#filtering-inside-rectangular-or-polygonal-areas). This parameter is ignored if you also specify `insideBoundingBox`. """ - natural_languages: Optional[List[SupportedLanguage]] = Field( - default=None, alias="naturalLanguages" - ) + natural_languages: Optional[List[SupportedLanguage]] = None """ ISO language codes that adjust settings that are useful for processing natural language queries (as opposed to keyword searches): - Sets `removeStopWords` and `ignorePlurals` to the list of provided languages. - Sets `removeWordsIfNoResults` to `allOptional`. - Adds a `natural_language` attribute to `ruleContexts` and `analyticsTags`. """ - rule_contexts: Optional[List[str]] = Field(default=None, alias="ruleContexts") + rule_contexts: Optional[List[str]] = None """ Assigns a rule context to the search query. [Rule contexts](https://www.algolia.com/doc/guides/managing-results/rules/rules-overview/how-to/customize-search-results-by-platform/#whats-a-context) are strings that you can use to trigger matching rules. """ - personalization_impact: Optional[int] = Field( - default=None, alias="personalizationImpact" - ) + personalization_impact: Optional[int] = None """ Impact that Personalization should have on this search. The higher this value is, the more Personalization determines the ranking compared to other factors. For more information, see [Understanding Personalization impact](https://www.algolia.com/doc/guides/personalization/personalizing-results/in-depth/configuring-personalization/#understanding-personalization-impact). """ - user_token: Optional[str] = Field(default=None, alias="userToken") + user_token: Optional[str] = None """ Unique pseudonymous or anonymous user identifier. This helps with analytics and click and conversion events. For more information, see [user token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - get_ranking_info: Optional[bool] = Field(default=None, alias="getRankingInfo") + get_ranking_info: Optional[bool] = None """ Whether the search response should include detailed ranking information. """ - synonyms: Optional[bool] = Field(default=None, alias="synonyms") + synonyms: Optional[bool] = None """ Whether to take into account an index's synonyms for this search. """ - click_analytics: Optional[bool] = Field(default=None, alias="clickAnalytics") + click_analytics: Optional[bool] = None """ Whether to include a `queryID` attribute in the response. The query ID is a unique identifier for a search query and is required for tracking [click and conversion events](https://www.algolia.com/guides/sending-events/getting-started/). """ - analytics: Optional[bool] = Field(default=None, alias="analytics") + analytics: Optional[bool] = None """ Whether this search will be included in Analytics. """ - analytics_tags: Optional[List[str]] = Field(default=None, alias="analyticsTags") + analytics_tags: Optional[List[str]] = None """ Tags to apply to the query for [segmenting analytics data](https://www.algolia.com/doc/guides/search-analytics/guides/segments/). """ - percentile_computation: Optional[bool] = Field( - default=None, alias="percentileComputation" - ) + percentile_computation: Optional[bool] = None """ Whether to include this search when calculating processing-time percentiles. """ - enable_ab_test: Optional[bool] = Field(default=None, alias="enableABTest") + enable_ab_test: Optional[bool] = None """ Whether to enable A/B testing for this search. """ - attributes_to_retrieve: Optional[List[str]] = Field( - default=None, alias="attributesToRetrieve" - ) + attributes_to_retrieve: Optional[List[str]] = None """ Attributes to include in the API response. To reduce the size of your response, you can retrieve only some of the attributes. Attribute names are case-sensitive. - `*` retrieves all attributes, except attributes included in the `customRanking` and `unretrievableAttributes` settings. - To retrieve all attributes except a specific one, prefix the attribute with a dash and combine it with the `*`: `[\"*\", \"-ATTRIBUTE\"]`. - The `objectID` attribute is always included. """ - ranking: Optional[List[str]] = Field(default=None, alias="ranking") + ranking: Optional[List[str]] = None """ Determines the order in which Algolia returns your results. By default, each entry corresponds to a [ranking criteria](https://www.algolia.com/doc/guides/managing-results/relevance-overview/in-depth/ranking-criteria/). The tie-breaking algorithm sequentially applies each criterion in the order they're specified. If you configure a replica index for [sorting by an attribute](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/how-to/sort-by-attribute/), you put the sorting attribute at the top of the list. **Modifiers** - `asc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in ascending order. - `desc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in descending order. Before you modify the default setting, you should test your changes in the dashboard, and by [A/B testing](https://www.algolia.com/doc/guides/ab-testing/what-is-ab-testing/). """ - custom_ranking: Optional[List[str]] = Field(default=None, alias="customRanking") + custom_ranking: Optional[List[str]] = None """ Attributes to use as [custom ranking](https://www.algolia.com/doc/guides/managing-results/must-do/custom-ranking/). Attribute names are case-sensitive. The custom ranking attributes decide which items are shown first if the other ranking criteria are equal. Records with missing values for your selected custom ranking attributes are always sorted last. Boolean attributes are sorted based on their alphabetical order. **Modifiers** - `asc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in ascending order. - `desc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in descending order. If you use two or more custom ranking attributes, [reduce the precision](https://www.algolia.com/doc/guides/managing-results/must-do/custom-ranking/how-to/controlling-custom-ranking-metrics-precision/) of your first attributes, or the other attributes will never be applied. """ - relevancy_strictness: Optional[int] = Field( - default=None, alias="relevancyStrictness" - ) + relevancy_strictness: Optional[int] = None """ Relevancy threshold below which less relevant results aren't included in the results. You can only set `relevancyStrictness` on [virtual replica indices](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/in-depth/replicas/#what-are-virtual-replicas). Use this setting to strike a balance between the relevance and number of returned results. """ - attributes_to_highlight: Optional[List[str]] = Field( - default=None, alias="attributesToHighlight" - ) + attributes_to_highlight: Optional[List[str]] = None """ Attributes to highlight. By default, all searchable attributes are highlighted. Use `*` to highlight all attributes or use an empty array `[]` to turn off highlighting. Attribute names are case-sensitive. With highlighting, strings that match the search query are surrounded by HTML tags defined by `highlightPreTag` and `highlightPostTag`. You can use this to visually highlight matching parts of a search query in your UI. For more information, see [Highlighting and snippeting](https://www.algolia.com/doc/guides/building-search-ui/ui-and-ux-patterns/highlighting-snippeting/js/). """ - attributes_to_snippet: Optional[List[str]] = Field( - default=None, alias="attributesToSnippet" - ) + attributes_to_snippet: Optional[List[str]] = None """ Attributes for which to enable snippets. Attribute names are case-sensitive. Snippets provide additional context to matched words. If you enable snippets, they include 10 words, including the matched word. The matched word will also be wrapped by HTML tags for highlighting. You can adjust the number of words with the following notation: `ATTRIBUTE:NUMBER`, where `NUMBER` is the number of words to be extracted. """ - highlight_pre_tag: Optional[str] = Field(default=None, alias="highlightPreTag") + highlight_pre_tag: Optional[str] = None """ HTML tag to insert before the highlighted parts in all highlighted results and snippets. """ - highlight_post_tag: Optional[str] = Field(default=None, alias="highlightPostTag") + highlight_post_tag: Optional[str] = None """ HTML tag to insert after the highlighted parts in all highlighted results and snippets. """ - snippet_ellipsis_text: Optional[str] = Field( - default=None, alias="snippetEllipsisText" - ) + snippet_ellipsis_text: Optional[str] = None """ String used as an ellipsis indicator when a snippet is truncated. """ - restrict_highlight_and_snippet_arrays: Optional[bool] = Field( - default=None, alias="restrictHighlightAndSnippetArrays" - ) + restrict_highlight_and_snippet_arrays: Optional[bool] = None """ Whether to restrict highlighting and snippeting to items that at least partially matched the search query. By default, all items are highlighted and snippeted. """ - hits_per_page: Optional[int] = Field(default=None, alias="hitsPerPage") + hits_per_page: Optional[int] = None """ Number of hits per page. """ - min_word_sizefor1_typo: Optional[int] = Field( - default=None, alias="minWordSizefor1Typo" - ) + min_word_sizefor1_typo: Optional[int] = None """ Minimum number of characters a word in the search query must contain to accept matches with [one typo](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/in-depth/configuring-typo-tolerance/#configuring-word-length-for-typos). """ - min_word_sizefor2_typos: Optional[int] = Field( - default=None, alias="minWordSizefor2Typos" - ) + min_word_sizefor2_typos: Optional[int] = None """ Minimum number of characters a word in the search query must contain to accept matches with [two typos](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/in-depth/configuring-typo-tolerance/#configuring-word-length-for-typos). """ - typo_tolerance: Optional[TypoTolerance] = Field(default=None, alias="typoTolerance") - allow_typos_on_numeric_tokens: Optional[bool] = Field( - default=None, alias="allowTyposOnNumericTokens" - ) + typo_tolerance: Optional[TypoTolerance] = None + allow_typos_on_numeric_tokens: Optional[bool] = None """ Whether to allow typos on numbers in the search query. Turn off this setting to reduce the number of irrelevant matches when searching in large sets of similar numbers. """ - disable_typo_tolerance_on_attributes: Optional[List[str]] = Field( - default=None, alias="disableTypoToleranceOnAttributes" - ) + disable_typo_tolerance_on_attributes: Optional[List[str]] = None """ Attributes for which you want to turn off [typo tolerance](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/). Attribute names are case-sensitive. Returning only exact matches can help when: - [Searching in hyphenated attributes](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/how-to/how-to-search-in-hyphenated-attributes/). - Reducing the number of matches when you have too many. This can happen with attributes that are long blocks of text, such as product descriptions. Consider alternatives such as `disableTypoToleranceOnWords` or adding synonyms if your attributes have intentional unusual spellings that might look like typos. """ - ignore_plurals: Optional[IgnorePlurals] = Field(default=None, alias="ignorePlurals") - remove_stop_words: Optional[RemoveStopWords] = Field( - default=None, alias="removeStopWords" - ) - keep_diacritics_on_characters: Optional[str] = Field( - default=None, alias="keepDiacriticsOnCharacters" - ) + ignore_plurals: Optional[IgnorePlurals] = None + remove_stop_words: Optional[RemoveStopWords] = None + keep_diacritics_on_characters: Optional[str] = None """ Characters for which diacritics should be preserved. By default, Algolia removes diacritics from letters. For example, `é` becomes `e`. If this causes issues in your search, you can specify characters that should keep their diacritics. """ - query_languages: Optional[List[SupportedLanguage]] = Field( - default=None, alias="queryLanguages" - ) + query_languages: Optional[List[SupportedLanguage]] = None """ Languages for language-specific query processing steps such as plurals, stop-word removal, and word-detection dictionaries. This setting sets a default list of languages used by the `removeStopWords` and `ignorePlurals` settings. This setting also sets a dictionary for word detection in the logogram-based [CJK](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/normalization/#normalization-for-logogram-based-languages-cjk) languages. To support this, you must place the CJK language **first**. **You should always specify a query language.** If you don't specify an indexing language, the search engine uses all [supported languages](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/supported-languages/), or the languages you specified with the `ignorePlurals` or `removeStopWords` parameters. This can lead to unexpected search results. For more information, see [Language-specific configuration](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/). """ - decompound_query: Optional[bool] = Field(default=None, alias="decompoundQuery") + decompound_query: Optional[bool] = None """ Whether to split compound words in the query into their building blocks. For more information, see [Word segmentation](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/#splitting-compound-words). Word segmentation is supported for these languages: German, Dutch, Finnish, Swedish, and Norwegian. Decompounding doesn't work for words with [non-spacing mark Unicode characters](https://www.charactercodes.net/category/non-spacing_mark). For example, `Gartenstühle` won't be decompounded if the `ü` consists of `u` (U+0075) and `◌̈` (U+0308). """ - enable_rules: Optional[bool] = Field(default=None, alias="enableRules") + enable_rules: Optional[bool] = None """ Whether to enable rules. """ - enable_personalization: Optional[bool] = Field( - default=None, alias="enablePersonalization" - ) + enable_personalization: Optional[bool] = None """ Whether to enable Personalization. """ - query_type: Optional[QueryType] = Field(default=None, alias="queryType") - remove_words_if_no_results: Optional[RemoveWordsIfNoResults] = Field( - default=None, alias="removeWordsIfNoResults" - ) - mode: Optional[Mode] = Field(default=None, alias="mode") - semantic_search: Optional[SemanticSearch] = Field( - default=None, alias="semanticSearch" - ) - advanced_syntax: Optional[bool] = Field(default=None, alias="advancedSyntax") + query_type: Optional[QueryType] = None + remove_words_if_no_results: Optional[RemoveWordsIfNoResults] = None + mode: Optional[Mode] = None + semantic_search: Optional[SemanticSearch] = None + advanced_syntax: Optional[bool] = None """ Whether to support phrase matching and excluding words from search queries. Use the `advancedSyntaxFeatures` parameter to control which feature is supported. """ - optional_words: Optional[List[str]] = Field(default=None, alias="optionalWords") + optional_words: Optional[List[str]] = None """ Words that should be considered optional when found in the query. By default, records must match all words in the search query to be included in the search results. Adding optional words can help to increase the number of search results by running an additional search query that doesn't include the optional words. For example, if the search query is \"action video\" and \"video\" is an optional word, the search engine runs two queries. One for \"action video\" and one for \"action\". Records that match all words are ranked higher. For a search query with 4 or more words **and** all its words are optional, the number of matched words required for a record to be included in the search results increases for every 1,000 records: - If `optionalWords` has less than 10 words, the required number of matched words increases by 1: results 1 to 1,000 require 1 matched word, results 1,001 to 2000 need 2 matched words. - If `optionalWords` has 10 or more words, the number of required matched words increases by the number of optional words dividied by 5 (rounded down). For example, with 18 optional words: results 1 to 1,000 require 1 matched word, results 1,001 to 2000 need 4 matched words. For more information, see [Optional words](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/empty-or-insufficient-results/#creating-a-list-of-optional-words). """ - disable_exact_on_attributes: Optional[List[str]] = Field( - default=None, alias="disableExactOnAttributes" - ) + disable_exact_on_attributes: Optional[List[str]] = None """ Searchable attributes for which you want to [turn off the Exact ranking criterion](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/override-search-engine-defaults/in-depth/adjust-exact-settings/#turn-off-exact-for-some-attributes). Attribute names are case-sensitive. This can be useful for attributes with long values, where the likelyhood of an exact match is high, such as product descriptions. Turning off the Exact ranking criterion for these attributes favors exact matching on other attributes. This reduces the impact of individual attributes with a lot of content on ranking. """ - exact_on_single_word_query: Optional[ExactOnSingleWordQuery] = Field( - default=None, alias="exactOnSingleWordQuery" - ) - alternatives_as_exact: Optional[List[AlternativesAsExact]] = Field( - default=None, alias="alternativesAsExact" - ) + exact_on_single_word_query: Optional[ExactOnSingleWordQuery] = None + alternatives_as_exact: Optional[List[AlternativesAsExact]] = None """ Alternatives of query words that should be considered as exact matches by the Exact ranking criterion. - `ignorePlurals`. Plurals and similar declensions added by the `ignorePlurals` setting are considered exact matches. - `singleWordSynonym`. Single-word synonyms, such as \"NY/NYC\" are considered exact matches. - `multiWordsSynonym`. Multi-word synonyms, such as \"NY/New York\" are considered exact matches. """ - advanced_syntax_features: Optional[List[AdvancedSyntaxFeatures]] = Field( - default=None, alias="advancedSyntaxFeatures" - ) + advanced_syntax_features: Optional[List[AdvancedSyntaxFeatures]] = None """ Advanced search syntax features you want to support. - `exactPhrase`. Phrases in quotes must match exactly. For example, `sparkly blue \"iPhone case\"` only returns records with the exact string \"iPhone case\". - `excludeWords`. Query words prefixed with a `-` must not occur in a record. For example, `search -engine` matches records that contain \"search\" but not \"engine\". This setting only has an effect if `advancedSyntax` is true. """ - distinct: Optional[Distinct] = Field(default=None, alias="distinct") - replace_synonyms_in_highlight: Optional[bool] = Field( - default=None, alias="replaceSynonymsInHighlight" - ) + distinct: Optional[Distinct] = None + replace_synonyms_in_highlight: Optional[bool] = None """ Whether to replace a highlighted word with the matched synonym. By default, the original words are highlighted even if a synonym matches. For example, with `home` as a synonym for `house` and a search for `home`, records matching either \"home\" or \"house\" are included in the search results, and either \"home\" or \"house\" are highlighted. With `replaceSynonymsInHighlight` set to `true`, a search for `home` still matches the same records, but all occurences of \"house\" are replaced by \"home\" in the highlighted response. """ - min_proximity: Optional[int] = Field(default=None, alias="minProximity") + min_proximity: Optional[int] = None """ Minimum proximity score for two matching words. This adjusts the [Proximity ranking criterion](https://www.algolia.com/doc/guides/managing-results/relevance-overview/in-depth/ranking-criteria/#proximity) by equally scoring matches that are farther apart. For example, if `minProximity` is 2, neighboring matches and matches with one word between them would have the same score. """ - response_fields: Optional[List[str]] = Field(default=None, alias="responseFields") + response_fields: Optional[List[str]] = None """ Properties to include in the API response of `search` and `browse` requests. By default, all response properties are included. To reduce the response size, you can select, which attributes should be included. You can't exclude these properties: `message`, `warning`, `cursor`, `serverUsed`, `indexUsed`, `abTestVariantID`, `parsedQuery`, or any property triggered by the `getRankingInfo` parameter. Don't exclude properties that you might need in your search UI. """ - max_facet_hits: Optional[int] = Field(default=None, alias="maxFacetHits") + max_facet_hits: Optional[int] = None """ Maximum number of facet values to return when [searching for facet values](https://www.algolia.com/doc/guides/managing-results/refine-results/faceting/#search-for-facet-values). """ - max_values_per_facet: Optional[int] = Field(default=None, alias="maxValuesPerFacet") + max_values_per_facet: Optional[int] = None """ Maximum number of facet values to return for each facet. """ - sort_facet_values_by: Optional[str] = Field(default=None, alias="sortFacetValuesBy") + sort_facet_values_by: Optional[str] = None """ Order in which to retrieve facet values. - `count`. Facet values are retrieved by decreasing count. The count is the number of matching records containing this facet value. - `alpha`. Retrieve facet values alphabetically. This setting doesn't influence how facet values are displayed in your UI (see `renderingContent`). For more information, see [facet value display](https://www.algolia.com/doc/guides/building-search-ui/ui-and-ux-patterns/facet-display/js/). """ - attribute_criteria_computed_by_min_proximity: Optional[bool] = Field( - default=None, alias="attributeCriteriaComputedByMinProximity" - ) + attribute_criteria_computed_by_min_proximity: Optional[bool] = None """ Whether the best matching attribute should be determined by minimum proximity. This setting only affects ranking if the Attribute ranking criterion comes before Proximity in the `ranking` setting. If true, the best matching attribute is selected based on the minimum proximity of multiple matches. Otherwise, the best matching attribute is determined by the order in the `searchableAttributes` setting. """ - rendering_content: Optional[RenderingContent] = Field( - default=None, alias="renderingContent" - ) - enable_re_ranking: Optional[bool] = Field(default=None, alias="enableReRanking") + rendering_content: Optional[RenderingContent] = None + enable_re_ranking: Optional[bool] = None """ Whether this search will use [Dynamic Re-Ranking](https://www.algolia.com/doc/guides/algolia-ai/re-ranking/). This setting only has an effect if you activated Dynamic Re-Ranking for this index in the Algolia dashboard. """ - re_ranking_apply_filter: Optional[ReRankingApplyFilter] = Field( - default=None, alias="reRankingApplyFilter" - ) - facet: str = Field(alias="facet") + re_ranking_apply_filter: Optional[ReRankingApplyFilter] = None + facet: str """ Facet name. """ - index_name: str = Field(alias="indexName") + index_name: str """ Index name (case-sensitive). """ - facet_query: Optional[str] = Field(default=None, alias="facetQuery") + facet_query: Optional[str] = None """ Text to search inside the facet's values. """ - type: SearchTypeFacet = Field(alias="type") + type: SearchTypeFacet model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/search_for_hits.py b/algoliasearch/search/models/search_for_hits.py index 9fc5e3d7a..70ea1de6d 100644 --- a/algoliasearch/search/models/search_for_hits.py +++ b/algoliasearch/search/models/search_for_hits.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -44,232 +44,245 @@ from algoliasearch.search.models.tag_filters import TagFilters from algoliasearch.search.models.typo_tolerance import TypoTolerance +_ALIASES = { + "params": "params", + "query": "query", + "similar_query": "similarQuery", + "filters": "filters", + "facet_filters": "facetFilters", + "optional_filters": "optionalFilters", + "numeric_filters": "numericFilters", + "tag_filters": "tagFilters", + "sum_or_filters_scores": "sumOrFiltersScores", + "restrict_searchable_attributes": "restrictSearchableAttributes", + "facets": "facets", + "faceting_after_distinct": "facetingAfterDistinct", + "page": "page", + "offset": "offset", + "length": "length", + "around_lat_lng": "aroundLatLng", + "around_lat_lng_via_ip": "aroundLatLngViaIP", + "around_radius": "aroundRadius", + "around_precision": "aroundPrecision", + "minimum_around_radius": "minimumAroundRadius", + "inside_bounding_box": "insideBoundingBox", + "inside_polygon": "insidePolygon", + "natural_languages": "naturalLanguages", + "rule_contexts": "ruleContexts", + "personalization_impact": "personalizationImpact", + "user_token": "userToken", + "get_ranking_info": "getRankingInfo", + "synonyms": "synonyms", + "click_analytics": "clickAnalytics", + "analytics": "analytics", + "analytics_tags": "analyticsTags", + "percentile_computation": "percentileComputation", + "enable_ab_test": "enableABTest", + "attributes_to_retrieve": "attributesToRetrieve", + "ranking": "ranking", + "custom_ranking": "customRanking", + "relevancy_strictness": "relevancyStrictness", + "attributes_to_highlight": "attributesToHighlight", + "attributes_to_snippet": "attributesToSnippet", + "highlight_pre_tag": "highlightPreTag", + "highlight_post_tag": "highlightPostTag", + "snippet_ellipsis_text": "snippetEllipsisText", + "restrict_highlight_and_snippet_arrays": "restrictHighlightAndSnippetArrays", + "hits_per_page": "hitsPerPage", + "min_word_sizefor1_typo": "minWordSizefor1Typo", + "min_word_sizefor2_typos": "minWordSizefor2Typos", + "typo_tolerance": "typoTolerance", + "allow_typos_on_numeric_tokens": "allowTyposOnNumericTokens", + "disable_typo_tolerance_on_attributes": "disableTypoToleranceOnAttributes", + "ignore_plurals": "ignorePlurals", + "remove_stop_words": "removeStopWords", + "keep_diacritics_on_characters": "keepDiacriticsOnCharacters", + "query_languages": "queryLanguages", + "decompound_query": "decompoundQuery", + "enable_rules": "enableRules", + "enable_personalization": "enablePersonalization", + "query_type": "queryType", + "remove_words_if_no_results": "removeWordsIfNoResults", + "mode": "mode", + "semantic_search": "semanticSearch", + "advanced_syntax": "advancedSyntax", + "optional_words": "optionalWords", + "disable_exact_on_attributes": "disableExactOnAttributes", + "exact_on_single_word_query": "exactOnSingleWordQuery", + "alternatives_as_exact": "alternativesAsExact", + "advanced_syntax_features": "advancedSyntaxFeatures", + "distinct": "distinct", + "replace_synonyms_in_highlight": "replaceSynonymsInHighlight", + "min_proximity": "minProximity", + "response_fields": "responseFields", + "max_facet_hits": "maxFacetHits", + "max_values_per_facet": "maxValuesPerFacet", + "sort_facet_values_by": "sortFacetValuesBy", + "attribute_criteria_computed_by_min_proximity": "attributeCriteriaComputedByMinProximity", + "rendering_content": "renderingContent", + "enable_re_ranking": "enableReRanking", + "re_ranking_apply_filter": "reRankingApplyFilter", + "index_name": "indexName", + "type": "type", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SearchForHits(BaseModel): """ SearchForHits """ - params: Optional[str] = Field(default=None, alias="params") + params: Optional[str] = None """ Search parameters as a URL-encoded query string. """ - query: Optional[str] = Field(default=None, alias="query") + query: Optional[str] = None """ Search query. """ - similar_query: Optional[str] = Field(default=None, alias="similarQuery") + similar_query: Optional[str] = None """ Keywords to be used instead of the search query to conduct a more broader search. Using the `similarQuery` parameter changes other settings: - `queryType` is set to `prefixNone`. - `removeStopWords` is set to true. - `words` is set as the first ranking criterion. - All remaining words are treated as `optionalWords`. Since the `similarQuery` is supposed to do a broad search, they usually return many results. Combine it with `filters` to narrow down the list of results. """ - filters: Optional[str] = Field(default=None, alias="filters") + filters: Optional[str] = None """ Filter expression to only include items that match the filter criteria in the response. You can use these filter expressions: - **Numeric filters.** ` `, where `` is one of `<`, `<=`, `=`, `!=`, `>`, `>=`. - **Ranges.** `: TO ` where `` and `` are the lower and upper limits of the range (inclusive). - **Facet filters.** `:` where `` is a facet attribute (case-sensitive) and `` a facet value. - **Tag filters.** `_tags:` or just `` (case-sensitive). - **Boolean filters.** `: true | false`. You can combine filters with `AND`, `OR`, and `NOT` operators with the following restrictions: - You can only combine filters of the same type with `OR`. **Not supported:** `facet:value OR num > 3`. - You can't use `NOT` with combinations of filters. **Not supported:** `NOT(facet:value OR facet:value)` - You can't combine conjunctions (`AND`) with `OR`. **Not supported:** `facet:value OR (facet:value AND facet:value)` Use quotes around your filters, if the facet attribute name or facet value has spaces, keywords (`OR`, `AND`, `NOT`), or quotes. If a facet attribute is an array, the filter matches if it matches at least one element of the array. For more information, see [Filters](https://www.algolia.com/doc/guides/managing-results/refine-results/filtering/). """ - facet_filters: Optional[FacetFilters] = Field(default=None, alias="facetFilters") - optional_filters: Optional[OptionalFilters] = Field( - default=None, alias="optionalFilters" - ) - numeric_filters: Optional[NumericFilters] = Field( - default=None, alias="numericFilters" - ) - tag_filters: Optional[TagFilters] = Field(default=None, alias="tagFilters") - sum_or_filters_scores: Optional[bool] = Field( - default=None, alias="sumOrFiltersScores" - ) + facet_filters: Optional[FacetFilters] = None + optional_filters: Optional[OptionalFilters] = None + numeric_filters: Optional[NumericFilters] = None + tag_filters: Optional[TagFilters] = None + sum_or_filters_scores: Optional[bool] = None """ Whether to sum all filter scores. If true, all filter scores are summed. Otherwise, the maximum filter score is kept. For more information, see [filter scores](https://www.algolia.com/doc/guides/managing-results/refine-results/filtering/in-depth/filter-scoring/#accumulating-scores-with-sumorfiltersscores). """ - restrict_searchable_attributes: Optional[List[str]] = Field( - default=None, alias="restrictSearchableAttributes" - ) + restrict_searchable_attributes: Optional[List[str]] = None """ Restricts a search to a subset of your searchable attributes. Attribute names are case-sensitive. """ - facets: Optional[List[str]] = Field(default=None, alias="facets") + facets: Optional[List[str]] = None """ Facets for which to retrieve facet values that match the search criteria and the number of matching facet values. To retrieve all facets, use the wildcard character `*`. For more information, see [facets](https://www.algolia.com/doc/guides/managing-results/refine-results/faceting/#contextual-facet-values-and-counts). """ - faceting_after_distinct: Optional[bool] = Field( - default=None, alias="facetingAfterDistinct" - ) + faceting_after_distinct: Optional[bool] = None """ Whether faceting should be applied after deduplication with `distinct`. This leads to accurate facet counts when using faceting in combination with `distinct`. It's usually better to use `afterDistinct` modifiers in the `attributesForFaceting` setting, as `facetingAfterDistinct` only computes correct facet counts if all records have the same facet values for the `attributeForDistinct`. """ - page: Optional[int] = Field(default=None, alias="page") + page: Optional[int] = None """ Page of search results to retrieve. """ - offset: Optional[int] = Field(default=None, alias="offset") + offset: Optional[int] = None """ Position of the first hit to retrieve. """ - length: Optional[int] = Field(default=None, alias="length") + length: Optional[int] = None """ Number of hits to retrieve (used in combination with `offset`). """ - around_lat_lng: Optional[str] = Field(default=None, alias="aroundLatLng") + around_lat_lng: Optional[str] = None """ Coordinates for the center of a circle, expressed as a comma-separated string of latitude and longitude. Only records included within circle around this central location are included in the results. The radius of the circle is determined by the `aroundRadius` and `minimumAroundRadius` settings. This parameter is ignored if you also specify `insidePolygon` or `insideBoundingBox`. """ - around_lat_lng_via_ip: Optional[bool] = Field( - default=None, alias="aroundLatLngViaIP" - ) + around_lat_lng_via_ip: Optional[bool] = None """ Whether to obtain the coordinates from the request's IP address. """ - around_radius: Optional[AroundRadius] = Field(default=None, alias="aroundRadius") - around_precision: Optional[AroundPrecision] = Field( - default=None, alias="aroundPrecision" - ) - minimum_around_radius: Optional[int] = Field( - default=None, alias="minimumAroundRadius" - ) + around_radius: Optional[AroundRadius] = None + around_precision: Optional[AroundPrecision] = None + minimum_around_radius: Optional[int] = None """ Minimum radius (in meters) for a search around a location when `aroundRadius` isn't set. """ - inside_bounding_box: Optional[List[List[float]]] = Field( - default=None, alias="insideBoundingBox" - ) + inside_bounding_box: Optional[List[List[float]]] = None """ Coordinates for a rectangular area in which to search. Each bounding box is defined by the two opposite points of its diagonal, and expressed as latitude and longitude pair: `[p1 lat, p1 long, p2 lat, p2 long]`. Provide multiple bounding boxes as nested arrays. For more information, see [rectangular area](https://www.algolia.com/doc/guides/managing-results/refine-results/geolocation/#filtering-inside-rectangular-or-polygonal-areas). """ - inside_polygon: Optional[List[List[float]]] = Field( - default=None, alias="insidePolygon" - ) + inside_polygon: Optional[List[List[float]]] = None """ Coordinates of a polygon in which to search. Polygons are defined by 3 to 10,000 points. Each point is represented by its latitude and longitude. Provide multiple polygons as nested arrays. For more information, see [filtering inside polygons](https://www.algolia.com/doc/guides/managing-results/refine-results/geolocation/#filtering-inside-rectangular-or-polygonal-areas). This parameter is ignored if you also specify `insideBoundingBox`. """ - natural_languages: Optional[List[SupportedLanguage]] = Field( - default=None, alias="naturalLanguages" - ) + natural_languages: Optional[List[SupportedLanguage]] = None """ ISO language codes that adjust settings that are useful for processing natural language queries (as opposed to keyword searches): - Sets `removeStopWords` and `ignorePlurals` to the list of provided languages. - Sets `removeWordsIfNoResults` to `allOptional`. - Adds a `natural_language` attribute to `ruleContexts` and `analyticsTags`. """ - rule_contexts: Optional[List[str]] = Field(default=None, alias="ruleContexts") + rule_contexts: Optional[List[str]] = None """ Assigns a rule context to the search query. [Rule contexts](https://www.algolia.com/doc/guides/managing-results/rules/rules-overview/how-to/customize-search-results-by-platform/#whats-a-context) are strings that you can use to trigger matching rules. """ - personalization_impact: Optional[int] = Field( - default=None, alias="personalizationImpact" - ) + personalization_impact: Optional[int] = None """ Impact that Personalization should have on this search. The higher this value is, the more Personalization determines the ranking compared to other factors. For more information, see [Understanding Personalization impact](https://www.algolia.com/doc/guides/personalization/personalizing-results/in-depth/configuring-personalization/#understanding-personalization-impact). """ - user_token: Optional[str] = Field(default=None, alias="userToken") + user_token: Optional[str] = None """ Unique pseudonymous or anonymous user identifier. This helps with analytics and click and conversion events. For more information, see [user token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - get_ranking_info: Optional[bool] = Field(default=None, alias="getRankingInfo") + get_ranking_info: Optional[bool] = None """ Whether the search response should include detailed ranking information. """ - synonyms: Optional[bool] = Field(default=None, alias="synonyms") + synonyms: Optional[bool] = None """ Whether to take into account an index's synonyms for this search. """ - click_analytics: Optional[bool] = Field(default=None, alias="clickAnalytics") + click_analytics: Optional[bool] = None """ Whether to include a `queryID` attribute in the response. The query ID is a unique identifier for a search query and is required for tracking [click and conversion events](https://www.algolia.com/guides/sending-events/getting-started/). """ - analytics: Optional[bool] = Field(default=None, alias="analytics") + analytics: Optional[bool] = None """ Whether this search will be included in Analytics. """ - analytics_tags: Optional[List[str]] = Field(default=None, alias="analyticsTags") + analytics_tags: Optional[List[str]] = None """ Tags to apply to the query for [segmenting analytics data](https://www.algolia.com/doc/guides/search-analytics/guides/segments/). """ - percentile_computation: Optional[bool] = Field( - default=None, alias="percentileComputation" - ) + percentile_computation: Optional[bool] = None """ Whether to include this search when calculating processing-time percentiles. """ - enable_ab_test: Optional[bool] = Field(default=None, alias="enableABTest") + enable_ab_test: Optional[bool] = None """ Whether to enable A/B testing for this search. """ - attributes_to_retrieve: Optional[List[str]] = Field( - default=None, alias="attributesToRetrieve" - ) + attributes_to_retrieve: Optional[List[str]] = None """ Attributes to include in the API response. To reduce the size of your response, you can retrieve only some of the attributes. Attribute names are case-sensitive. - `*` retrieves all attributes, except attributes included in the `customRanking` and `unretrievableAttributes` settings. - To retrieve all attributes except a specific one, prefix the attribute with a dash and combine it with the `*`: `[\"*\", \"-ATTRIBUTE\"]`. - The `objectID` attribute is always included. """ - ranking: Optional[List[str]] = Field(default=None, alias="ranking") + ranking: Optional[List[str]] = None """ Determines the order in which Algolia returns your results. By default, each entry corresponds to a [ranking criteria](https://www.algolia.com/doc/guides/managing-results/relevance-overview/in-depth/ranking-criteria/). The tie-breaking algorithm sequentially applies each criterion in the order they're specified. If you configure a replica index for [sorting by an attribute](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/how-to/sort-by-attribute/), you put the sorting attribute at the top of the list. **Modifiers** - `asc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in ascending order. - `desc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in descending order. Before you modify the default setting, you should test your changes in the dashboard, and by [A/B testing](https://www.algolia.com/doc/guides/ab-testing/what-is-ab-testing/). """ - custom_ranking: Optional[List[str]] = Field(default=None, alias="customRanking") + custom_ranking: Optional[List[str]] = None """ Attributes to use as [custom ranking](https://www.algolia.com/doc/guides/managing-results/must-do/custom-ranking/). Attribute names are case-sensitive. The custom ranking attributes decide which items are shown first if the other ranking criteria are equal. Records with missing values for your selected custom ranking attributes are always sorted last. Boolean attributes are sorted based on their alphabetical order. **Modifiers** - `asc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in ascending order. - `desc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in descending order. If you use two or more custom ranking attributes, [reduce the precision](https://www.algolia.com/doc/guides/managing-results/must-do/custom-ranking/how-to/controlling-custom-ranking-metrics-precision/) of your first attributes, or the other attributes will never be applied. """ - relevancy_strictness: Optional[int] = Field( - default=None, alias="relevancyStrictness" - ) + relevancy_strictness: Optional[int] = None """ Relevancy threshold below which less relevant results aren't included in the results. You can only set `relevancyStrictness` on [virtual replica indices](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/in-depth/replicas/#what-are-virtual-replicas). Use this setting to strike a balance between the relevance and number of returned results. """ - attributes_to_highlight: Optional[List[str]] = Field( - default=None, alias="attributesToHighlight" - ) + attributes_to_highlight: Optional[List[str]] = None """ Attributes to highlight. By default, all searchable attributes are highlighted. Use `*` to highlight all attributes or use an empty array `[]` to turn off highlighting. Attribute names are case-sensitive. With highlighting, strings that match the search query are surrounded by HTML tags defined by `highlightPreTag` and `highlightPostTag`. You can use this to visually highlight matching parts of a search query in your UI. For more information, see [Highlighting and snippeting](https://www.algolia.com/doc/guides/building-search-ui/ui-and-ux-patterns/highlighting-snippeting/js/). """ - attributes_to_snippet: Optional[List[str]] = Field( - default=None, alias="attributesToSnippet" - ) + attributes_to_snippet: Optional[List[str]] = None """ Attributes for which to enable snippets. Attribute names are case-sensitive. Snippets provide additional context to matched words. If you enable snippets, they include 10 words, including the matched word. The matched word will also be wrapped by HTML tags for highlighting. You can adjust the number of words with the following notation: `ATTRIBUTE:NUMBER`, where `NUMBER` is the number of words to be extracted. """ - highlight_pre_tag: Optional[str] = Field(default=None, alias="highlightPreTag") + highlight_pre_tag: Optional[str] = None """ HTML tag to insert before the highlighted parts in all highlighted results and snippets. """ - highlight_post_tag: Optional[str] = Field(default=None, alias="highlightPostTag") + highlight_post_tag: Optional[str] = None """ HTML tag to insert after the highlighted parts in all highlighted results and snippets. """ - snippet_ellipsis_text: Optional[str] = Field( - default=None, alias="snippetEllipsisText" - ) + snippet_ellipsis_text: Optional[str] = None """ String used as an ellipsis indicator when a snippet is truncated. """ - restrict_highlight_and_snippet_arrays: Optional[bool] = Field( - default=None, alias="restrictHighlightAndSnippetArrays" - ) + restrict_highlight_and_snippet_arrays: Optional[bool] = None """ Whether to restrict highlighting and snippeting to items that at least partially matched the search query. By default, all items are highlighted and snippeted. """ - hits_per_page: Optional[int] = Field(default=None, alias="hitsPerPage") + hits_per_page: Optional[int] = None """ Number of hits per page. """ - min_word_sizefor1_typo: Optional[int] = Field( - default=None, alias="minWordSizefor1Typo" - ) + min_word_sizefor1_typo: Optional[int] = None """ Minimum number of characters a word in the search query must contain to accept matches with [one typo](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/in-depth/configuring-typo-tolerance/#configuring-word-length-for-typos). """ - min_word_sizefor2_typos: Optional[int] = Field( - default=None, alias="minWordSizefor2Typos" - ) + min_word_sizefor2_typos: Optional[int] = None """ Minimum number of characters a word in the search query must contain to accept matches with [two typos](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/in-depth/configuring-typo-tolerance/#configuring-word-length-for-typos). """ - typo_tolerance: Optional[TypoTolerance] = Field(default=None, alias="typoTolerance") - allow_typos_on_numeric_tokens: Optional[bool] = Field( - default=None, alias="allowTyposOnNumericTokens" - ) + typo_tolerance: Optional[TypoTolerance] = None + allow_typos_on_numeric_tokens: Optional[bool] = None """ Whether to allow typos on numbers in the search query. Turn off this setting to reduce the number of irrelevant matches when searching in large sets of similar numbers. """ - disable_typo_tolerance_on_attributes: Optional[List[str]] = Field( - default=None, alias="disableTypoToleranceOnAttributes" - ) + disable_typo_tolerance_on_attributes: Optional[List[str]] = None """ Attributes for which you want to turn off [typo tolerance](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/). Attribute names are case-sensitive. Returning only exact matches can help when: - [Searching in hyphenated attributes](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/how-to/how-to-search-in-hyphenated-attributes/). - Reducing the number of matches when you have too many. This can happen with attributes that are long blocks of text, such as product descriptions. Consider alternatives such as `disableTypoToleranceOnWords` or adding synonyms if your attributes have intentional unusual spellings that might look like typos. """ - ignore_plurals: Optional[IgnorePlurals] = Field(default=None, alias="ignorePlurals") - remove_stop_words: Optional[RemoveStopWords] = Field( - default=None, alias="removeStopWords" - ) - keep_diacritics_on_characters: Optional[str] = Field( - default=None, alias="keepDiacriticsOnCharacters" - ) + ignore_plurals: Optional[IgnorePlurals] = None + remove_stop_words: Optional[RemoveStopWords] = None + keep_diacritics_on_characters: Optional[str] = None """ Characters for which diacritics should be preserved. By default, Algolia removes diacritics from letters. For example, `é` becomes `e`. If this causes issues in your search, you can specify characters that should keep their diacritics. """ - query_languages: Optional[List[SupportedLanguage]] = Field( - default=None, alias="queryLanguages" - ) + query_languages: Optional[List[SupportedLanguage]] = None """ Languages for language-specific query processing steps such as plurals, stop-word removal, and word-detection dictionaries. This setting sets a default list of languages used by the `removeStopWords` and `ignorePlurals` settings. This setting also sets a dictionary for word detection in the logogram-based [CJK](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/normalization/#normalization-for-logogram-based-languages-cjk) languages. To support this, you must place the CJK language **first**. **You should always specify a query language.** If you don't specify an indexing language, the search engine uses all [supported languages](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/supported-languages/), or the languages you specified with the `ignorePlurals` or `removeStopWords` parameters. This can lead to unexpected search results. For more information, see [Language-specific configuration](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/). """ - decompound_query: Optional[bool] = Field(default=None, alias="decompoundQuery") + decompound_query: Optional[bool] = None """ Whether to split compound words in the query into their building blocks. For more information, see [Word segmentation](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/#splitting-compound-words). Word segmentation is supported for these languages: German, Dutch, Finnish, Swedish, and Norwegian. Decompounding doesn't work for words with [non-spacing mark Unicode characters](https://www.charactercodes.net/category/non-spacing_mark). For example, `Gartenstühle` won't be decompounded if the `ü` consists of `u` (U+0075) and `◌̈` (U+0308). """ - enable_rules: Optional[bool] = Field(default=None, alias="enableRules") + enable_rules: Optional[bool] = None """ Whether to enable rules. """ - enable_personalization: Optional[bool] = Field( - default=None, alias="enablePersonalization" - ) + enable_personalization: Optional[bool] = None """ Whether to enable Personalization. """ - query_type: Optional[QueryType] = Field(default=None, alias="queryType") - remove_words_if_no_results: Optional[RemoveWordsIfNoResults] = Field( - default=None, alias="removeWordsIfNoResults" - ) - mode: Optional[Mode] = Field(default=None, alias="mode") - semantic_search: Optional[SemanticSearch] = Field( - default=None, alias="semanticSearch" - ) - advanced_syntax: Optional[bool] = Field(default=None, alias="advancedSyntax") + query_type: Optional[QueryType] = None + remove_words_if_no_results: Optional[RemoveWordsIfNoResults] = None + mode: Optional[Mode] = None + semantic_search: Optional[SemanticSearch] = None + advanced_syntax: Optional[bool] = None """ Whether to support phrase matching and excluding words from search queries. Use the `advancedSyntaxFeatures` parameter to control which feature is supported. """ - optional_words: Optional[List[str]] = Field(default=None, alias="optionalWords") + optional_words: Optional[List[str]] = None """ Words that should be considered optional when found in the query. By default, records must match all words in the search query to be included in the search results. Adding optional words can help to increase the number of search results by running an additional search query that doesn't include the optional words. For example, if the search query is \"action video\" and \"video\" is an optional word, the search engine runs two queries. One for \"action video\" and one for \"action\". Records that match all words are ranked higher. For a search query with 4 or more words **and** all its words are optional, the number of matched words required for a record to be included in the search results increases for every 1,000 records: - If `optionalWords` has less than 10 words, the required number of matched words increases by 1: results 1 to 1,000 require 1 matched word, results 1,001 to 2000 need 2 matched words. - If `optionalWords` has 10 or more words, the number of required matched words increases by the number of optional words dividied by 5 (rounded down). For example, with 18 optional words: results 1 to 1,000 require 1 matched word, results 1,001 to 2000 need 4 matched words. For more information, see [Optional words](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/empty-or-insufficient-results/#creating-a-list-of-optional-words). """ - disable_exact_on_attributes: Optional[List[str]] = Field( - default=None, alias="disableExactOnAttributes" - ) + disable_exact_on_attributes: Optional[List[str]] = None """ Searchable attributes for which you want to [turn off the Exact ranking criterion](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/override-search-engine-defaults/in-depth/adjust-exact-settings/#turn-off-exact-for-some-attributes). Attribute names are case-sensitive. This can be useful for attributes with long values, where the likelyhood of an exact match is high, such as product descriptions. Turning off the Exact ranking criterion for these attributes favors exact matching on other attributes. This reduces the impact of individual attributes with a lot of content on ranking. """ - exact_on_single_word_query: Optional[ExactOnSingleWordQuery] = Field( - default=None, alias="exactOnSingleWordQuery" - ) - alternatives_as_exact: Optional[List[AlternativesAsExact]] = Field( - default=None, alias="alternativesAsExact" - ) + exact_on_single_word_query: Optional[ExactOnSingleWordQuery] = None + alternatives_as_exact: Optional[List[AlternativesAsExact]] = None """ Alternatives of query words that should be considered as exact matches by the Exact ranking criterion. - `ignorePlurals`. Plurals and similar declensions added by the `ignorePlurals` setting are considered exact matches. - `singleWordSynonym`. Single-word synonyms, such as \"NY/NYC\" are considered exact matches. - `multiWordsSynonym`. Multi-word synonyms, such as \"NY/New York\" are considered exact matches. """ - advanced_syntax_features: Optional[List[AdvancedSyntaxFeatures]] = Field( - default=None, alias="advancedSyntaxFeatures" - ) + advanced_syntax_features: Optional[List[AdvancedSyntaxFeatures]] = None """ Advanced search syntax features you want to support. - `exactPhrase`. Phrases in quotes must match exactly. For example, `sparkly blue \"iPhone case\"` only returns records with the exact string \"iPhone case\". - `excludeWords`. Query words prefixed with a `-` must not occur in a record. For example, `search -engine` matches records that contain \"search\" but not \"engine\". This setting only has an effect if `advancedSyntax` is true. """ - distinct: Optional[Distinct] = Field(default=None, alias="distinct") - replace_synonyms_in_highlight: Optional[bool] = Field( - default=None, alias="replaceSynonymsInHighlight" - ) + distinct: Optional[Distinct] = None + replace_synonyms_in_highlight: Optional[bool] = None """ Whether to replace a highlighted word with the matched synonym. By default, the original words are highlighted even if a synonym matches. For example, with `home` as a synonym for `house` and a search for `home`, records matching either \"home\" or \"house\" are included in the search results, and either \"home\" or \"house\" are highlighted. With `replaceSynonymsInHighlight` set to `true`, a search for `home` still matches the same records, but all occurences of \"house\" are replaced by \"home\" in the highlighted response. """ - min_proximity: Optional[int] = Field(default=None, alias="minProximity") + min_proximity: Optional[int] = None """ Minimum proximity score for two matching words. This adjusts the [Proximity ranking criterion](https://www.algolia.com/doc/guides/managing-results/relevance-overview/in-depth/ranking-criteria/#proximity) by equally scoring matches that are farther apart. For example, if `minProximity` is 2, neighboring matches and matches with one word between them would have the same score. """ - response_fields: Optional[List[str]] = Field(default=None, alias="responseFields") + response_fields: Optional[List[str]] = None """ Properties to include in the API response of `search` and `browse` requests. By default, all response properties are included. To reduce the response size, you can select, which attributes should be included. You can't exclude these properties: `message`, `warning`, `cursor`, `serverUsed`, `indexUsed`, `abTestVariantID`, `parsedQuery`, or any property triggered by the `getRankingInfo` parameter. Don't exclude properties that you might need in your search UI. """ - max_facet_hits: Optional[int] = Field(default=None, alias="maxFacetHits") + max_facet_hits: Optional[int] = None """ Maximum number of facet values to return when [searching for facet values](https://www.algolia.com/doc/guides/managing-results/refine-results/faceting/#search-for-facet-values). """ - max_values_per_facet: Optional[int] = Field(default=None, alias="maxValuesPerFacet") + max_values_per_facet: Optional[int] = None """ Maximum number of facet values to return for each facet. """ - sort_facet_values_by: Optional[str] = Field(default=None, alias="sortFacetValuesBy") + sort_facet_values_by: Optional[str] = None """ Order in which to retrieve facet values. - `count`. Facet values are retrieved by decreasing count. The count is the number of matching records containing this facet value. - `alpha`. Retrieve facet values alphabetically. This setting doesn't influence how facet values are displayed in your UI (see `renderingContent`). For more information, see [facet value display](https://www.algolia.com/doc/guides/building-search-ui/ui-and-ux-patterns/facet-display/js/). """ - attribute_criteria_computed_by_min_proximity: Optional[bool] = Field( - default=None, alias="attributeCriteriaComputedByMinProximity" - ) + attribute_criteria_computed_by_min_proximity: Optional[bool] = None """ Whether the best matching attribute should be determined by minimum proximity. This setting only affects ranking if the Attribute ranking criterion comes before Proximity in the `ranking` setting. If true, the best matching attribute is selected based on the minimum proximity of multiple matches. Otherwise, the best matching attribute is determined by the order in the `searchableAttributes` setting. """ - rendering_content: Optional[RenderingContent] = Field( - default=None, alias="renderingContent" - ) - enable_re_ranking: Optional[bool] = Field(default=None, alias="enableReRanking") + rendering_content: Optional[RenderingContent] = None + enable_re_ranking: Optional[bool] = None """ Whether this search will use [Dynamic Re-Ranking](https://www.algolia.com/doc/guides/algolia-ai/re-ranking/). This setting only has an effect if you activated Dynamic Re-Ranking for this index in the Algolia dashboard. """ - re_ranking_apply_filter: Optional[ReRankingApplyFilter] = Field( - default=None, alias="reRankingApplyFilter" - ) - index_name: str = Field(alias="indexName") + re_ranking_apply_filter: Optional[ReRankingApplyFilter] = None + index_name: str """ Index name (case-sensitive). """ - type: Optional[SearchTypeDefault] = Field(default=None, alias="type") + type: Optional[SearchTypeDefault] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/search_method_params.py b/algoliasearch/search/models/search_method_params.py index 10884527b..c3017c2e3 100644 --- a/algoliasearch/search/models/search_method_params.py +++ b/algoliasearch/search/models/search_method_params.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -21,20 +21,30 @@ from algoliasearch.search.models.search_query import SearchQuery from algoliasearch.search.models.search_strategy import SearchStrategy +_ALIASES = { + "requests": "requests", + "strategy": "strategy", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SearchMethodParams(BaseModel): """ SearchMethodParams """ - requests: List[SearchQuery] = Field(alias="requests") - strategy: Optional[SearchStrategy] = Field(default=None, alias="strategy") + requests: List[SearchQuery] + strategy: Optional[SearchStrategy] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/search_params.py b/algoliasearch/search/models/search_params.py index 597003a07..89b925274 100644 --- a/algoliasearch/search/models/search_params.py +++ b/algoliasearch/search/models/search_params.py @@ -31,7 +31,7 @@ class SearchParams(BaseModel): oneof_schema_2_validator: Optional[SearchParamsObject] = Field(default=None) - actual_instance: Optional[Union[SearchParamsObject, SearchParamsString]] = None + actual_instance: Union[SearchParamsObject, SearchParamsString, None] = None one_of_schemas: Set[str] = {"SearchParamsObject", "SearchParamsString"} def __init__(self, *args, **kwargs) -> None: @@ -44,14 +44,14 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer def unwrap_actual_instance( self, - ) -> Optional[Union[SearchParamsObject, SearchParamsString]]: + ) -> Union[SearchParamsObject, SearchParamsString, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -92,9 +92,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -106,8 +106,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/search/models/search_params_object.py b/algoliasearch/search/models/search_params_object.py index 91d977052..cf35c42ec 100644 --- a/algoliasearch/search/models/search_params_object.py +++ b/algoliasearch/search/models/search_params_object.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -43,227 +43,237 @@ from algoliasearch.search.models.tag_filters import TagFilters from algoliasearch.search.models.typo_tolerance import TypoTolerance +_ALIASES = { + "query": "query", + "similar_query": "similarQuery", + "filters": "filters", + "facet_filters": "facetFilters", + "optional_filters": "optionalFilters", + "numeric_filters": "numericFilters", + "tag_filters": "tagFilters", + "sum_or_filters_scores": "sumOrFiltersScores", + "restrict_searchable_attributes": "restrictSearchableAttributes", + "facets": "facets", + "faceting_after_distinct": "facetingAfterDistinct", + "page": "page", + "offset": "offset", + "length": "length", + "around_lat_lng": "aroundLatLng", + "around_lat_lng_via_ip": "aroundLatLngViaIP", + "around_radius": "aroundRadius", + "around_precision": "aroundPrecision", + "minimum_around_radius": "minimumAroundRadius", + "inside_bounding_box": "insideBoundingBox", + "inside_polygon": "insidePolygon", + "natural_languages": "naturalLanguages", + "rule_contexts": "ruleContexts", + "personalization_impact": "personalizationImpact", + "user_token": "userToken", + "get_ranking_info": "getRankingInfo", + "synonyms": "synonyms", + "click_analytics": "clickAnalytics", + "analytics": "analytics", + "analytics_tags": "analyticsTags", + "percentile_computation": "percentileComputation", + "enable_ab_test": "enableABTest", + "attributes_to_retrieve": "attributesToRetrieve", + "ranking": "ranking", + "custom_ranking": "customRanking", + "relevancy_strictness": "relevancyStrictness", + "attributes_to_highlight": "attributesToHighlight", + "attributes_to_snippet": "attributesToSnippet", + "highlight_pre_tag": "highlightPreTag", + "highlight_post_tag": "highlightPostTag", + "snippet_ellipsis_text": "snippetEllipsisText", + "restrict_highlight_and_snippet_arrays": "restrictHighlightAndSnippetArrays", + "hits_per_page": "hitsPerPage", + "min_word_sizefor1_typo": "minWordSizefor1Typo", + "min_word_sizefor2_typos": "minWordSizefor2Typos", + "typo_tolerance": "typoTolerance", + "allow_typos_on_numeric_tokens": "allowTyposOnNumericTokens", + "disable_typo_tolerance_on_attributes": "disableTypoToleranceOnAttributes", + "ignore_plurals": "ignorePlurals", + "remove_stop_words": "removeStopWords", + "keep_diacritics_on_characters": "keepDiacriticsOnCharacters", + "query_languages": "queryLanguages", + "decompound_query": "decompoundQuery", + "enable_rules": "enableRules", + "enable_personalization": "enablePersonalization", + "query_type": "queryType", + "remove_words_if_no_results": "removeWordsIfNoResults", + "mode": "mode", + "semantic_search": "semanticSearch", + "advanced_syntax": "advancedSyntax", + "optional_words": "optionalWords", + "disable_exact_on_attributes": "disableExactOnAttributes", + "exact_on_single_word_query": "exactOnSingleWordQuery", + "alternatives_as_exact": "alternativesAsExact", + "advanced_syntax_features": "advancedSyntaxFeatures", + "distinct": "distinct", + "replace_synonyms_in_highlight": "replaceSynonymsInHighlight", + "min_proximity": "minProximity", + "response_fields": "responseFields", + "max_facet_hits": "maxFacetHits", + "max_values_per_facet": "maxValuesPerFacet", + "sort_facet_values_by": "sortFacetValuesBy", + "attribute_criteria_computed_by_min_proximity": "attributeCriteriaComputedByMinProximity", + "rendering_content": "renderingContent", + "enable_re_ranking": "enableReRanking", + "re_ranking_apply_filter": "reRankingApplyFilter", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SearchParamsObject(BaseModel): """ Each parameter value, including the `query` must not be larger than 512 bytes. """ - query: Optional[str] = Field(default=None, alias="query") + query: Optional[str] = None """ Search query. """ - similar_query: Optional[str] = Field(default=None, alias="similarQuery") + similar_query: Optional[str] = None """ Keywords to be used instead of the search query to conduct a more broader search. Using the `similarQuery` parameter changes other settings: - `queryType` is set to `prefixNone`. - `removeStopWords` is set to true. - `words` is set as the first ranking criterion. - All remaining words are treated as `optionalWords`. Since the `similarQuery` is supposed to do a broad search, they usually return many results. Combine it with `filters` to narrow down the list of results. """ - filters: Optional[str] = Field(default=None, alias="filters") + filters: Optional[str] = None """ Filter expression to only include items that match the filter criteria in the response. You can use these filter expressions: - **Numeric filters.** ` `, where `` is one of `<`, `<=`, `=`, `!=`, `>`, `>=`. - **Ranges.** `: TO ` where `` and `` are the lower and upper limits of the range (inclusive). - **Facet filters.** `:` where `` is a facet attribute (case-sensitive) and `` a facet value. - **Tag filters.** `_tags:` or just `` (case-sensitive). - **Boolean filters.** `: true | false`. You can combine filters with `AND`, `OR`, and `NOT` operators with the following restrictions: - You can only combine filters of the same type with `OR`. **Not supported:** `facet:value OR num > 3`. - You can't use `NOT` with combinations of filters. **Not supported:** `NOT(facet:value OR facet:value)` - You can't combine conjunctions (`AND`) with `OR`. **Not supported:** `facet:value OR (facet:value AND facet:value)` Use quotes around your filters, if the facet attribute name or facet value has spaces, keywords (`OR`, `AND`, `NOT`), or quotes. If a facet attribute is an array, the filter matches if it matches at least one element of the array. For more information, see [Filters](https://www.algolia.com/doc/guides/managing-results/refine-results/filtering/). """ - facet_filters: Optional[FacetFilters] = Field(default=None, alias="facetFilters") - optional_filters: Optional[OptionalFilters] = Field( - default=None, alias="optionalFilters" - ) - numeric_filters: Optional[NumericFilters] = Field( - default=None, alias="numericFilters" - ) - tag_filters: Optional[TagFilters] = Field(default=None, alias="tagFilters") - sum_or_filters_scores: Optional[bool] = Field( - default=None, alias="sumOrFiltersScores" - ) + facet_filters: Optional[FacetFilters] = None + optional_filters: Optional[OptionalFilters] = None + numeric_filters: Optional[NumericFilters] = None + tag_filters: Optional[TagFilters] = None + sum_or_filters_scores: Optional[bool] = None """ Whether to sum all filter scores. If true, all filter scores are summed. Otherwise, the maximum filter score is kept. For more information, see [filter scores](https://www.algolia.com/doc/guides/managing-results/refine-results/filtering/in-depth/filter-scoring/#accumulating-scores-with-sumorfiltersscores). """ - restrict_searchable_attributes: Optional[List[str]] = Field( - default=None, alias="restrictSearchableAttributes" - ) + restrict_searchable_attributes: Optional[List[str]] = None """ Restricts a search to a subset of your searchable attributes. Attribute names are case-sensitive. """ - facets: Optional[List[str]] = Field(default=None, alias="facets") + facets: Optional[List[str]] = None """ Facets for which to retrieve facet values that match the search criteria and the number of matching facet values. To retrieve all facets, use the wildcard character `*`. For more information, see [facets](https://www.algolia.com/doc/guides/managing-results/refine-results/faceting/#contextual-facet-values-and-counts). """ - faceting_after_distinct: Optional[bool] = Field( - default=None, alias="facetingAfterDistinct" - ) + faceting_after_distinct: Optional[bool] = None """ Whether faceting should be applied after deduplication with `distinct`. This leads to accurate facet counts when using faceting in combination with `distinct`. It's usually better to use `afterDistinct` modifiers in the `attributesForFaceting` setting, as `facetingAfterDistinct` only computes correct facet counts if all records have the same facet values for the `attributeForDistinct`. """ - page: Optional[int] = Field(default=None, alias="page") + page: Optional[int] = None """ Page of search results to retrieve. """ - offset: Optional[int] = Field(default=None, alias="offset") + offset: Optional[int] = None """ Position of the first hit to retrieve. """ - length: Optional[int] = Field(default=None, alias="length") + length: Optional[int] = None """ Number of hits to retrieve (used in combination with `offset`). """ - around_lat_lng: Optional[str] = Field(default=None, alias="aroundLatLng") + around_lat_lng: Optional[str] = None """ Coordinates for the center of a circle, expressed as a comma-separated string of latitude and longitude. Only records included within circle around this central location are included in the results. The radius of the circle is determined by the `aroundRadius` and `minimumAroundRadius` settings. This parameter is ignored if you also specify `insidePolygon` or `insideBoundingBox`. """ - around_lat_lng_via_ip: Optional[bool] = Field( - default=None, alias="aroundLatLngViaIP" - ) + around_lat_lng_via_ip: Optional[bool] = None """ Whether to obtain the coordinates from the request's IP address. """ - around_radius: Optional[AroundRadius] = Field(default=None, alias="aroundRadius") - around_precision: Optional[AroundPrecision] = Field( - default=None, alias="aroundPrecision" - ) - minimum_around_radius: Optional[int] = Field( - default=None, alias="minimumAroundRadius" - ) + around_radius: Optional[AroundRadius] = None + around_precision: Optional[AroundPrecision] = None + minimum_around_radius: Optional[int] = None """ Minimum radius (in meters) for a search around a location when `aroundRadius` isn't set. """ - inside_bounding_box: Optional[List[List[float]]] = Field( - default=None, alias="insideBoundingBox" - ) + inside_bounding_box: Optional[List[List[float]]] = None """ Coordinates for a rectangular area in which to search. Each bounding box is defined by the two opposite points of its diagonal, and expressed as latitude and longitude pair: `[p1 lat, p1 long, p2 lat, p2 long]`. Provide multiple bounding boxes as nested arrays. For more information, see [rectangular area](https://www.algolia.com/doc/guides/managing-results/refine-results/geolocation/#filtering-inside-rectangular-or-polygonal-areas). """ - inside_polygon: Optional[List[List[float]]] = Field( - default=None, alias="insidePolygon" - ) + inside_polygon: Optional[List[List[float]]] = None """ Coordinates of a polygon in which to search. Polygons are defined by 3 to 10,000 points. Each point is represented by its latitude and longitude. Provide multiple polygons as nested arrays. For more information, see [filtering inside polygons](https://www.algolia.com/doc/guides/managing-results/refine-results/geolocation/#filtering-inside-rectangular-or-polygonal-areas). This parameter is ignored if you also specify `insideBoundingBox`. """ - natural_languages: Optional[List[SupportedLanguage]] = Field( - default=None, alias="naturalLanguages" - ) + natural_languages: Optional[List[SupportedLanguage]] = None """ ISO language codes that adjust settings that are useful for processing natural language queries (as opposed to keyword searches): - Sets `removeStopWords` and `ignorePlurals` to the list of provided languages. - Sets `removeWordsIfNoResults` to `allOptional`. - Adds a `natural_language` attribute to `ruleContexts` and `analyticsTags`. """ - rule_contexts: Optional[List[str]] = Field(default=None, alias="ruleContexts") + rule_contexts: Optional[List[str]] = None """ Assigns a rule context to the search query. [Rule contexts](https://www.algolia.com/doc/guides/managing-results/rules/rules-overview/how-to/customize-search-results-by-platform/#whats-a-context) are strings that you can use to trigger matching rules. """ - personalization_impact: Optional[int] = Field( - default=None, alias="personalizationImpact" - ) + personalization_impact: Optional[int] = None """ Impact that Personalization should have on this search. The higher this value is, the more Personalization determines the ranking compared to other factors. For more information, see [Understanding Personalization impact](https://www.algolia.com/doc/guides/personalization/personalizing-results/in-depth/configuring-personalization/#understanding-personalization-impact). """ - user_token: Optional[str] = Field(default=None, alias="userToken") + user_token: Optional[str] = None """ Unique pseudonymous or anonymous user identifier. This helps with analytics and click and conversion events. For more information, see [user token](https://www.algolia.com/doc/guides/sending-events/concepts/usertoken/). """ - get_ranking_info: Optional[bool] = Field(default=None, alias="getRankingInfo") + get_ranking_info: Optional[bool] = None """ Whether the search response should include detailed ranking information. """ - synonyms: Optional[bool] = Field(default=None, alias="synonyms") + synonyms: Optional[bool] = None """ Whether to take into account an index's synonyms for this search. """ - click_analytics: Optional[bool] = Field(default=None, alias="clickAnalytics") + click_analytics: Optional[bool] = None """ Whether to include a `queryID` attribute in the response. The query ID is a unique identifier for a search query and is required for tracking [click and conversion events](https://www.algolia.com/guides/sending-events/getting-started/). """ - analytics: Optional[bool] = Field(default=None, alias="analytics") + analytics: Optional[bool] = None """ Whether this search will be included in Analytics. """ - analytics_tags: Optional[List[str]] = Field(default=None, alias="analyticsTags") + analytics_tags: Optional[List[str]] = None """ Tags to apply to the query for [segmenting analytics data](https://www.algolia.com/doc/guides/search-analytics/guides/segments/). """ - percentile_computation: Optional[bool] = Field( - default=None, alias="percentileComputation" - ) + percentile_computation: Optional[bool] = None """ Whether to include this search when calculating processing-time percentiles. """ - enable_ab_test: Optional[bool] = Field(default=None, alias="enableABTest") + enable_ab_test: Optional[bool] = None """ Whether to enable A/B testing for this search. """ - attributes_to_retrieve: Optional[List[str]] = Field( - default=None, alias="attributesToRetrieve" - ) + attributes_to_retrieve: Optional[List[str]] = None """ Attributes to include in the API response. To reduce the size of your response, you can retrieve only some of the attributes. Attribute names are case-sensitive. - `*` retrieves all attributes, except attributes included in the `customRanking` and `unretrievableAttributes` settings. - To retrieve all attributes except a specific one, prefix the attribute with a dash and combine it with the `*`: `[\"*\", \"-ATTRIBUTE\"]`. - The `objectID` attribute is always included. """ - ranking: Optional[List[str]] = Field(default=None, alias="ranking") + ranking: Optional[List[str]] = None """ Determines the order in which Algolia returns your results. By default, each entry corresponds to a [ranking criteria](https://www.algolia.com/doc/guides/managing-results/relevance-overview/in-depth/ranking-criteria/). The tie-breaking algorithm sequentially applies each criterion in the order they're specified. If you configure a replica index for [sorting by an attribute](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/how-to/sort-by-attribute/), you put the sorting attribute at the top of the list. **Modifiers** - `asc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in ascending order. - `desc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in descending order. Before you modify the default setting, you should test your changes in the dashboard, and by [A/B testing](https://www.algolia.com/doc/guides/ab-testing/what-is-ab-testing/). """ - custom_ranking: Optional[List[str]] = Field(default=None, alias="customRanking") + custom_ranking: Optional[List[str]] = None """ Attributes to use as [custom ranking](https://www.algolia.com/doc/guides/managing-results/must-do/custom-ranking/). Attribute names are case-sensitive. The custom ranking attributes decide which items are shown first if the other ranking criteria are equal. Records with missing values for your selected custom ranking attributes are always sorted last. Boolean attributes are sorted based on their alphabetical order. **Modifiers** - `asc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in ascending order. - `desc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in descending order. If you use two or more custom ranking attributes, [reduce the precision](https://www.algolia.com/doc/guides/managing-results/must-do/custom-ranking/how-to/controlling-custom-ranking-metrics-precision/) of your first attributes, or the other attributes will never be applied. """ - relevancy_strictness: Optional[int] = Field( - default=None, alias="relevancyStrictness" - ) + relevancy_strictness: Optional[int] = None """ Relevancy threshold below which less relevant results aren't included in the results. You can only set `relevancyStrictness` on [virtual replica indices](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/in-depth/replicas/#what-are-virtual-replicas). Use this setting to strike a balance between the relevance and number of returned results. """ - attributes_to_highlight: Optional[List[str]] = Field( - default=None, alias="attributesToHighlight" - ) + attributes_to_highlight: Optional[List[str]] = None """ Attributes to highlight. By default, all searchable attributes are highlighted. Use `*` to highlight all attributes or use an empty array `[]` to turn off highlighting. Attribute names are case-sensitive. With highlighting, strings that match the search query are surrounded by HTML tags defined by `highlightPreTag` and `highlightPostTag`. You can use this to visually highlight matching parts of a search query in your UI. For more information, see [Highlighting and snippeting](https://www.algolia.com/doc/guides/building-search-ui/ui-and-ux-patterns/highlighting-snippeting/js/). """ - attributes_to_snippet: Optional[List[str]] = Field( - default=None, alias="attributesToSnippet" - ) + attributes_to_snippet: Optional[List[str]] = None """ Attributes for which to enable snippets. Attribute names are case-sensitive. Snippets provide additional context to matched words. If you enable snippets, they include 10 words, including the matched word. The matched word will also be wrapped by HTML tags for highlighting. You can adjust the number of words with the following notation: `ATTRIBUTE:NUMBER`, where `NUMBER` is the number of words to be extracted. """ - highlight_pre_tag: Optional[str] = Field(default=None, alias="highlightPreTag") + highlight_pre_tag: Optional[str] = None """ HTML tag to insert before the highlighted parts in all highlighted results and snippets. """ - highlight_post_tag: Optional[str] = Field(default=None, alias="highlightPostTag") + highlight_post_tag: Optional[str] = None """ HTML tag to insert after the highlighted parts in all highlighted results and snippets. """ - snippet_ellipsis_text: Optional[str] = Field( - default=None, alias="snippetEllipsisText" - ) + snippet_ellipsis_text: Optional[str] = None """ String used as an ellipsis indicator when a snippet is truncated. """ - restrict_highlight_and_snippet_arrays: Optional[bool] = Field( - default=None, alias="restrictHighlightAndSnippetArrays" - ) + restrict_highlight_and_snippet_arrays: Optional[bool] = None """ Whether to restrict highlighting and snippeting to items that at least partially matched the search query. By default, all items are highlighted and snippeted. """ - hits_per_page: Optional[int] = Field(default=None, alias="hitsPerPage") + hits_per_page: Optional[int] = None """ Number of hits per page. """ - min_word_sizefor1_typo: Optional[int] = Field( - default=None, alias="minWordSizefor1Typo" - ) + min_word_sizefor1_typo: Optional[int] = None """ Minimum number of characters a word in the search query must contain to accept matches with [one typo](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/in-depth/configuring-typo-tolerance/#configuring-word-length-for-typos). """ - min_word_sizefor2_typos: Optional[int] = Field( - default=None, alias="minWordSizefor2Typos" - ) + min_word_sizefor2_typos: Optional[int] = None """ Minimum number of characters a word in the search query must contain to accept matches with [two typos](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/in-depth/configuring-typo-tolerance/#configuring-word-length-for-typos). """ - typo_tolerance: Optional[TypoTolerance] = Field(default=None, alias="typoTolerance") - allow_typos_on_numeric_tokens: Optional[bool] = Field( - default=None, alias="allowTyposOnNumericTokens" - ) + typo_tolerance: Optional[TypoTolerance] = None + allow_typos_on_numeric_tokens: Optional[bool] = None """ Whether to allow typos on numbers in the search query. Turn off this setting to reduce the number of irrelevant matches when searching in large sets of similar numbers. """ - disable_typo_tolerance_on_attributes: Optional[List[str]] = Field( - default=None, alias="disableTypoToleranceOnAttributes" - ) + disable_typo_tolerance_on_attributes: Optional[List[str]] = None """ Attributes for which you want to turn off [typo tolerance](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/). Attribute names are case-sensitive. Returning only exact matches can help when: - [Searching in hyphenated attributes](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/how-to/how-to-search-in-hyphenated-attributes/). - Reducing the number of matches when you have too many. This can happen with attributes that are long blocks of text, such as product descriptions. Consider alternatives such as `disableTypoToleranceOnWords` or adding synonyms if your attributes have intentional unusual spellings that might look like typos. """ - ignore_plurals: Optional[IgnorePlurals] = Field(default=None, alias="ignorePlurals") - remove_stop_words: Optional[RemoveStopWords] = Field( - default=None, alias="removeStopWords" - ) - keep_diacritics_on_characters: Optional[str] = Field( - default=None, alias="keepDiacriticsOnCharacters" - ) + ignore_plurals: Optional[IgnorePlurals] = None + remove_stop_words: Optional[RemoveStopWords] = None + keep_diacritics_on_characters: Optional[str] = None """ Characters for which diacritics should be preserved. By default, Algolia removes diacritics from letters. For example, `é` becomes `e`. If this causes issues in your search, you can specify characters that should keep their diacritics. """ - query_languages: Optional[List[SupportedLanguage]] = Field( - default=None, alias="queryLanguages" - ) + query_languages: Optional[List[SupportedLanguage]] = None """ Languages for language-specific query processing steps such as plurals, stop-word removal, and word-detection dictionaries. This setting sets a default list of languages used by the `removeStopWords` and `ignorePlurals` settings. This setting also sets a dictionary for word detection in the logogram-based [CJK](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/normalization/#normalization-for-logogram-based-languages-cjk) languages. To support this, you must place the CJK language **first**. **You should always specify a query language.** If you don't specify an indexing language, the search engine uses all [supported languages](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/supported-languages/), or the languages you specified with the `ignorePlurals` or `removeStopWords` parameters. This can lead to unexpected search results. For more information, see [Language-specific configuration](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/). """ - decompound_query: Optional[bool] = Field(default=None, alias="decompoundQuery") + decompound_query: Optional[bool] = None """ Whether to split compound words in the query into their building blocks. For more information, see [Word segmentation](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/#splitting-compound-words). Word segmentation is supported for these languages: German, Dutch, Finnish, Swedish, and Norwegian. Decompounding doesn't work for words with [non-spacing mark Unicode characters](https://www.charactercodes.net/category/non-spacing_mark). For example, `Gartenstühle` won't be decompounded if the `ü` consists of `u` (U+0075) and `◌̈` (U+0308). """ - enable_rules: Optional[bool] = Field(default=None, alias="enableRules") + enable_rules: Optional[bool] = None """ Whether to enable rules. """ - enable_personalization: Optional[bool] = Field( - default=None, alias="enablePersonalization" - ) + enable_personalization: Optional[bool] = None """ Whether to enable Personalization. """ - query_type: Optional[QueryType] = Field(default=None, alias="queryType") - remove_words_if_no_results: Optional[RemoveWordsIfNoResults] = Field( - default=None, alias="removeWordsIfNoResults" - ) - mode: Optional[Mode] = Field(default=None, alias="mode") - semantic_search: Optional[SemanticSearch] = Field( - default=None, alias="semanticSearch" - ) - advanced_syntax: Optional[bool] = Field(default=None, alias="advancedSyntax") + query_type: Optional[QueryType] = None + remove_words_if_no_results: Optional[RemoveWordsIfNoResults] = None + mode: Optional[Mode] = None + semantic_search: Optional[SemanticSearch] = None + advanced_syntax: Optional[bool] = None """ Whether to support phrase matching and excluding words from search queries. Use the `advancedSyntaxFeatures` parameter to control which feature is supported. """ - optional_words: Optional[List[str]] = Field(default=None, alias="optionalWords") + optional_words: Optional[List[str]] = None """ Words that should be considered optional when found in the query. By default, records must match all words in the search query to be included in the search results. Adding optional words can help to increase the number of search results by running an additional search query that doesn't include the optional words. For example, if the search query is \"action video\" and \"video\" is an optional word, the search engine runs two queries. One for \"action video\" and one for \"action\". Records that match all words are ranked higher. For a search query with 4 or more words **and** all its words are optional, the number of matched words required for a record to be included in the search results increases for every 1,000 records: - If `optionalWords` has less than 10 words, the required number of matched words increases by 1: results 1 to 1,000 require 1 matched word, results 1,001 to 2000 need 2 matched words. - If `optionalWords` has 10 or more words, the number of required matched words increases by the number of optional words dividied by 5 (rounded down). For example, with 18 optional words: results 1 to 1,000 require 1 matched word, results 1,001 to 2000 need 4 matched words. For more information, see [Optional words](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/empty-or-insufficient-results/#creating-a-list-of-optional-words). """ - disable_exact_on_attributes: Optional[List[str]] = Field( - default=None, alias="disableExactOnAttributes" - ) + disable_exact_on_attributes: Optional[List[str]] = None """ Searchable attributes for which you want to [turn off the Exact ranking criterion](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/override-search-engine-defaults/in-depth/adjust-exact-settings/#turn-off-exact-for-some-attributes). Attribute names are case-sensitive. This can be useful for attributes with long values, where the likelyhood of an exact match is high, such as product descriptions. Turning off the Exact ranking criterion for these attributes favors exact matching on other attributes. This reduces the impact of individual attributes with a lot of content on ranking. """ - exact_on_single_word_query: Optional[ExactOnSingleWordQuery] = Field( - default=None, alias="exactOnSingleWordQuery" - ) - alternatives_as_exact: Optional[List[AlternativesAsExact]] = Field( - default=None, alias="alternativesAsExact" - ) + exact_on_single_word_query: Optional[ExactOnSingleWordQuery] = None + alternatives_as_exact: Optional[List[AlternativesAsExact]] = None """ Alternatives of query words that should be considered as exact matches by the Exact ranking criterion. - `ignorePlurals`. Plurals and similar declensions added by the `ignorePlurals` setting are considered exact matches. - `singleWordSynonym`. Single-word synonyms, such as \"NY/NYC\" are considered exact matches. - `multiWordsSynonym`. Multi-word synonyms, such as \"NY/New York\" are considered exact matches. """ - advanced_syntax_features: Optional[List[AdvancedSyntaxFeatures]] = Field( - default=None, alias="advancedSyntaxFeatures" - ) + advanced_syntax_features: Optional[List[AdvancedSyntaxFeatures]] = None """ Advanced search syntax features you want to support. - `exactPhrase`. Phrases in quotes must match exactly. For example, `sparkly blue \"iPhone case\"` only returns records with the exact string \"iPhone case\". - `excludeWords`. Query words prefixed with a `-` must not occur in a record. For example, `search -engine` matches records that contain \"search\" but not \"engine\". This setting only has an effect if `advancedSyntax` is true. """ - distinct: Optional[Distinct] = Field(default=None, alias="distinct") - replace_synonyms_in_highlight: Optional[bool] = Field( - default=None, alias="replaceSynonymsInHighlight" - ) + distinct: Optional[Distinct] = None + replace_synonyms_in_highlight: Optional[bool] = None """ Whether to replace a highlighted word with the matched synonym. By default, the original words are highlighted even if a synonym matches. For example, with `home` as a synonym for `house` and a search for `home`, records matching either \"home\" or \"house\" are included in the search results, and either \"home\" or \"house\" are highlighted. With `replaceSynonymsInHighlight` set to `true`, a search for `home` still matches the same records, but all occurences of \"house\" are replaced by \"home\" in the highlighted response. """ - min_proximity: Optional[int] = Field(default=None, alias="minProximity") + min_proximity: Optional[int] = None """ Minimum proximity score for two matching words. This adjusts the [Proximity ranking criterion](https://www.algolia.com/doc/guides/managing-results/relevance-overview/in-depth/ranking-criteria/#proximity) by equally scoring matches that are farther apart. For example, if `minProximity` is 2, neighboring matches and matches with one word between them would have the same score. """ - response_fields: Optional[List[str]] = Field(default=None, alias="responseFields") + response_fields: Optional[List[str]] = None """ Properties to include in the API response of `search` and `browse` requests. By default, all response properties are included. To reduce the response size, you can select, which attributes should be included. You can't exclude these properties: `message`, `warning`, `cursor`, `serverUsed`, `indexUsed`, `abTestVariantID`, `parsedQuery`, or any property triggered by the `getRankingInfo` parameter. Don't exclude properties that you might need in your search UI. """ - max_facet_hits: Optional[int] = Field(default=None, alias="maxFacetHits") + max_facet_hits: Optional[int] = None """ Maximum number of facet values to return when [searching for facet values](https://www.algolia.com/doc/guides/managing-results/refine-results/faceting/#search-for-facet-values). """ - max_values_per_facet: Optional[int] = Field(default=None, alias="maxValuesPerFacet") + max_values_per_facet: Optional[int] = None """ Maximum number of facet values to return for each facet. """ - sort_facet_values_by: Optional[str] = Field(default=None, alias="sortFacetValuesBy") + sort_facet_values_by: Optional[str] = None """ Order in which to retrieve facet values. - `count`. Facet values are retrieved by decreasing count. The count is the number of matching records containing this facet value. - `alpha`. Retrieve facet values alphabetically. This setting doesn't influence how facet values are displayed in your UI (see `renderingContent`). For more information, see [facet value display](https://www.algolia.com/doc/guides/building-search-ui/ui-and-ux-patterns/facet-display/js/). """ - attribute_criteria_computed_by_min_proximity: Optional[bool] = Field( - default=None, alias="attributeCriteriaComputedByMinProximity" - ) + attribute_criteria_computed_by_min_proximity: Optional[bool] = None """ Whether the best matching attribute should be determined by minimum proximity. This setting only affects ranking if the Attribute ranking criterion comes before Proximity in the `ranking` setting. If true, the best matching attribute is selected based on the minimum proximity of multiple matches. Otherwise, the best matching attribute is determined by the order in the `searchableAttributes` setting. """ - rendering_content: Optional[RenderingContent] = Field( - default=None, alias="renderingContent" - ) - enable_re_ranking: Optional[bool] = Field(default=None, alias="enableReRanking") + rendering_content: Optional[RenderingContent] = None + enable_re_ranking: Optional[bool] = None """ Whether this search will use [Dynamic Re-Ranking](https://www.algolia.com/doc/guides/algolia-ai/re-ranking/). This setting only has an effect if you activated Dynamic Re-Ranking for this index in the Algolia dashboard. """ - re_ranking_apply_filter: Optional[ReRankingApplyFilter] = Field( - default=None, alias="reRankingApplyFilter" - ) + re_ranking_apply_filter: Optional[ReRankingApplyFilter] = None model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/search_params_string.py b/algoliasearch/search/models/search_params_string.py index c9c258e19..9b1ea1545 100644 --- a/algoliasearch/search/models/search_params_string.py +++ b/algoliasearch/search/models/search_params_string.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "params": "params", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class SearchParamsString(BaseModel): """ Search parameters as query string. """ - params: Optional[str] = Field(default=None, alias="params") + params: Optional[str] = None """ Search parameters as a URL-encoded query string. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class SearchParamsString(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/search_query.py b/algoliasearch/search/models/search_query.py index 86c6d7e28..57973b7be 100644 --- a/algoliasearch/search/models/search_query.py +++ b/algoliasearch/search/models/search_query.py @@ -31,7 +31,7 @@ class SearchQuery(BaseModel): oneof_schema_2_validator: Optional[SearchForFacets] = Field(default=None) - actual_instance: Optional[Union[SearchForFacets, SearchForHits]] = None + actual_instance: Union[SearchForFacets, SearchForHits, None] = None one_of_schemas: Set[str] = {"SearchForFacets", "SearchForHits"} def __init__(self, *args, **kwargs) -> None: @@ -44,12 +44,14 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[SearchForFacets, SearchForHits]]: + def unwrap_actual_instance( + self, + ) -> Union[SearchForFacets, SearchForHits, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -90,9 +92,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -104,8 +106,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/search/models/search_response.py b/algoliasearch/search/models/search_response.py index b06eeccd3..8e7351ac8 100644 --- a/algoliasearch/search/models/search_response.py +++ b/algoliasearch/search/models/search_response.py @@ -11,7 +11,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field, field_validator +from pydantic import BaseModel, ConfigDict, field_validator if version_info >= (3, 11): from typing import Self @@ -25,80 +25,111 @@ from algoliasearch.search.models.redirect import Redirect from algoliasearch.search.models.rendering_content import RenderingContent +_ALIASES = { + "ab_test_id": "abTestID", + "ab_test_variant_id": "abTestVariantID", + "around_lat_lng": "aroundLatLng", + "automatic_radius": "automaticRadius", + "exhaustive": "exhaustive", + "exhaustive_facets_count": "exhaustiveFacetsCount", + "exhaustive_nb_hits": "exhaustiveNbHits", + "exhaustive_typo": "exhaustiveTypo", + "facets": "facets", + "facets_stats": "facets_stats", + "index": "index", + "index_used": "indexUsed", + "message": "message", + "nb_sorted_hits": "nbSortedHits", + "parsed_query": "parsedQuery", + "processing_time_ms": "processingTimeMS", + "processing_timings_ms": "processingTimingsMS", + "query_after_removal": "queryAfterRemoval", + "redirect": "redirect", + "rendering_content": "renderingContent", + "server_time_ms": "serverTimeMS", + "server_used": "serverUsed", + "user_data": "userData", + "query_id": "queryID", + "automatic_insights": "_automaticInsights", + "page": "page", + "nb_hits": "nbHits", + "nb_pages": "nbPages", + "hits_per_page": "hitsPerPage", + "hits": "hits", + "query": "query", + "params": "params", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SearchResponse(BaseModel): """ SearchResponse """ - ab_test_id: Optional[int] = Field(default=None, alias="abTestID") + ab_test_id: Optional[int] = None """ A/B test ID. This is only included in the response for indices that are part of an A/B test. """ - ab_test_variant_id: Optional[int] = Field(default=None, alias="abTestVariantID") + ab_test_variant_id: Optional[int] = None """ Variant ID. This is only included in the response for indices that are part of an A/B test. """ - around_lat_lng: Optional[str] = Field(default=None, alias="aroundLatLng") + around_lat_lng: Optional[str] = None """ Computed geographical location. """ - automatic_radius: Optional[str] = Field(default=None, alias="automaticRadius") + automatic_radius: Optional[str] = None """ Distance from a central coordinate provided by `aroundLatLng`. """ - exhaustive: Optional[Exhaustive] = Field(default=None, alias="exhaustive") - exhaustive_facets_count: Optional[bool] = Field( - default=None, alias="exhaustiveFacetsCount" - ) + exhaustive: Optional[Exhaustive] = None + exhaustive_facets_count: Optional[bool] = None """ See the `facetsCount` field of the `exhaustive` object in the response. """ - exhaustive_nb_hits: Optional[bool] = Field(default=None, alias="exhaustiveNbHits") + exhaustive_nb_hits: Optional[bool] = None """ See the `nbHits` field of the `exhaustive` object in the response. """ - exhaustive_typo: Optional[bool] = Field(default=None, alias="exhaustiveTypo") + exhaustive_typo: Optional[bool] = None """ See the `typo` field of the `exhaustive` object in the response. """ - facets: Optional[Dict[str, Dict[str, int]]] = Field(default=None, alias="facets") + facets: Optional[Dict[str, Dict[str, int]]] = None """ Facet counts. """ - facets_stats: Optional[Dict[str, FacetStats]] = Field( - default=None, alias="facets_stats" - ) + facets_stats: Optional[Dict[str, FacetStats]] = None """ Statistics for numerical facets. """ - index: Optional[str] = Field(default=None, alias="index") + index: Optional[str] = None """ Index name used for the query. """ - index_used: Optional[str] = Field(default=None, alias="indexUsed") + index_used: Optional[str] = None """ Index name used for the query. During A/B testing, the targeted index isn't always the index used by the query. """ - message: Optional[str] = Field(default=None, alias="message") + message: Optional[str] = None """ Warnings about the query. """ - nb_sorted_hits: Optional[int] = Field(default=None, alias="nbSortedHits") + nb_sorted_hits: Optional[int] = None """ Number of hits selected and sorted by the relevant sort algorithm. """ - parsed_query: Optional[str] = Field(default=None, alias="parsedQuery") + parsed_query: Optional[str] = None """ Post-[normalization](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/#what-does-normalization-mean) query string that will be searched. """ - processing_time_ms: int = Field(alias="processingTimeMS") + processing_time_ms: int """ Time the server took to process the request, in milliseconds. """ - processing_timings_ms: Optional[object] = Field( - default=None, alias="processingTimingsMS" - ) + processing_timings_ms: Optional[object] = None """ Experimental. List of processing steps and their times, in milliseconds. You can use this list to investigate performance issues. """ - query_after_removal: Optional[str] = Field(default=None, alias="queryAfterRemoval") + query_after_removal: Optional[str] = None """ Markup text indicating which parts of the original query have been removed to retrieve a non-empty result set. """ - redirect: Optional[Redirect] = Field(default=None, alias="redirect") - rendering_content: Optional[RenderingContent] = Field( - default=None, alias="renderingContent" - ) - server_time_ms: Optional[int] = Field(default=None, alias="serverTimeMS") + redirect: Optional[Redirect] = None + rendering_content: Optional[RenderingContent] = None + server_time_ms: Optional[int] = None """ Time the server took to process the request, in milliseconds. """ - server_used: Optional[str] = Field(default=None, alias="serverUsed") + server_used: Optional[str] = None """ Host name of the server that processed the request. """ - user_data: Optional[object] = Field(default=None, alias="userData") + user_data: Optional[object] = None """ An object with custom data. You can store up to 32kB as custom data. """ - query_id: Optional[str] = Field(default=None, alias="queryID") + query_id: Optional[str] = None """ Unique identifier for the query. This is used for [click analytics](https://www.algolia.com/doc/guides/analytics/click-analytics/). """ - automatic_insights: Optional[bool] = Field(default=None, alias="_automaticInsights") + automatic_insights: Optional[bool] = None """ Whether automatic events collection is enabled for the application. """ - page: Optional[int] = Field(default=None, alias="page") + page: Optional[int] = None """ Page of search results to retrieve. """ - nb_hits: Optional[int] = Field(default=None, alias="nbHits") + nb_hits: Optional[int] = None """ Number of results (hits). """ - nb_pages: Optional[int] = Field(default=None, alias="nbPages") + nb_pages: Optional[int] = None """ Number of pages of results. """ - hits_per_page: Optional[int] = Field(default=None, alias="hitsPerPage") + hits_per_page: Optional[int] = None """ Number of hits per page. """ - hits: List[Hit] = Field(alias="hits") + hits: List[Hit] """ Search results (hits). Hits are records from your index that match the search criteria, augmented with additional attributes, such as, for highlighting. """ - query: str = Field(alias="query") + query: str """ Search query. """ - params: str = Field(alias="params") + params: str """ URL-encoded string of all search parameters. """ @field_validator("around_lat_lng") @@ -118,6 +149,7 @@ def around_lat_lng_validate_regular_expression(cls, value): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, extra="allow", ) diff --git a/algoliasearch/search/models/search_responses.py b/algoliasearch/search/models/search_responses.py index 99d0d7eba..835e70aab 100644 --- a/algoliasearch/search/models/search_responses.py +++ b/algoliasearch/search/models/search_responses.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,28 @@ from algoliasearch.search.models.search_result import SearchResult +_ALIASES = { + "results": "results", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SearchResponses(BaseModel): """ SearchResponses """ - results: List[SearchResult] = Field(alias="results") + results: List[SearchResult] model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/search_result.py b/algoliasearch/search/models/search_result.py index e23536f3f..6cb14df77 100644 --- a/algoliasearch/search/models/search_result.py +++ b/algoliasearch/search/models/search_result.py @@ -35,9 +35,7 @@ class SearchResult(BaseModel): default=None ) - actual_instance: Optional[Union[SearchForFacetValuesResponse, SearchResponse]] = ( - None - ) + actual_instance: Union[SearchForFacetValuesResponse, SearchResponse, None] = None one_of_schemas: Set[str] = {"SearchForFacetValuesResponse", "SearchResponse"} def __init__(self, *args, **kwargs) -> None: @@ -50,14 +48,14 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer def unwrap_actual_instance( self, - ) -> Optional[Union[SearchForFacetValuesResponse, SearchResponse]]: + ) -> Union[SearchForFacetValuesResponse, SearchResponse, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -98,9 +96,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -112,8 +110,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/search/models/search_rules_params.py b/algoliasearch/search/models/search_rules_params.py index 562725a88..478354f02 100644 --- a/algoliasearch/search/models/search_rules_params.py +++ b/algoliasearch/search/models/search_rules_params.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,22 +20,35 @@ from algoliasearch.search.models.anchoring import Anchoring +_ALIASES = { + "query": "query", + "anchoring": "anchoring", + "context": "context", + "page": "page", + "hits_per_page": "hitsPerPage", + "enabled": "enabled", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SearchRulesParams(BaseModel): """ Rules search parameters. """ - query: Optional[str] = Field(default=None, alias="query") + query: Optional[str] = None """ Search query for rules. """ - anchoring: Optional[Anchoring] = Field(default=None, alias="anchoring") - context: Optional[str] = Field(default=None, alias="context") + anchoring: Optional[Anchoring] = None + context: Optional[str] = None """ Only return rules that match the context (exact match). """ - page: Optional[int] = Field(default=None, alias="page") + page: Optional[int] = None """ Requested page of the API response. """ - hits_per_page: Optional[int] = Field(default=None, alias="hitsPerPage") + hits_per_page: Optional[int] = None """ Maximum number of hits per page. """ - enabled: Optional[bool] = Field(default=None, alias="enabled") + enabled: Optional[bool] = None """ If `true`, return only enabled rules. If `false`, return only inactive rules. By default, _all_ rules are returned. """ model_config = ConfigDict( @@ -43,6 +56,7 @@ class SearchRulesParams(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/search_rules_response.py b/algoliasearch/search/models/search_rules_response.py index debe4d6ed..3071c6225 100644 --- a/algoliasearch/search/models/search_rules_response.py +++ b/algoliasearch/search/models/search_rules_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,19 +20,30 @@ from algoliasearch.search.models.rule import Rule +_ALIASES = { + "hits": "hits", + "nb_hits": "nbHits", + "page": "page", + "nb_pages": "nbPages", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SearchRulesResponse(BaseModel): """ SearchRulesResponse """ - hits: List[Rule] = Field(alias="hits") + hits: List[Rule] """ Rules that matched the search criteria. """ - nb_hits: int = Field(alias="nbHits") + nb_hits: int """ Number of rules that matched the search criteria. """ - page: int = Field(alias="page") + page: int """ Current page. """ - nb_pages: int = Field(alias="nbPages") + nb_pages: int """ Number of pages. """ model_config = ConfigDict( @@ -40,6 +51,7 @@ class SearchRulesResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/search_synonyms_params.py b/algoliasearch/search/models/search_synonyms_params.py index 85a5cc1cc..3f8b663ce 100644 --- a/algoliasearch/search/models/search_synonyms_params.py +++ b/algoliasearch/search/models/search_synonyms_params.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,18 +20,29 @@ from algoliasearch.search.models.synonym_type import SynonymType +_ALIASES = { + "query": "query", + "type": "type", + "page": "page", + "hits_per_page": "hitsPerPage", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SearchSynonymsParams(BaseModel): """ SearchSynonymsParams """ - query: Optional[str] = Field(default=None, alias="query") + query: Optional[str] = None """ Search query. """ - type: Optional[SynonymType] = Field(default=None, alias="type") - page: Optional[int] = Field(default=None, alias="page") + type: Optional[SynonymType] = None + page: Optional[int] = None """ Page of search results to retrieve. """ - hits_per_page: Optional[int] = Field(default=None, alias="hitsPerPage") + hits_per_page: Optional[int] = None """ Number of hits per page. """ model_config = ConfigDict( @@ -39,6 +50,7 @@ class SearchSynonymsParams(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/search_synonyms_response.py b/algoliasearch/search/models/search_synonyms_response.py index 873f82d72..0d86287bb 100644 --- a/algoliasearch/search/models/search_synonyms_response.py +++ b/algoliasearch/search/models/search_synonyms_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,15 +20,24 @@ from algoliasearch.search.models.synonym_hit import SynonymHit +_ALIASES = { + "hits": "hits", + "nb_hits": "nbHits", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SearchSynonymsResponse(BaseModel): """ SearchSynonymsResponse """ - hits: List[SynonymHit] = Field(alias="hits") + hits: List[SynonymHit] """ Matching synonyms. """ - nb_hits: int = Field(alias="nbHits") + nb_hits: int """ Number of results (hits). """ model_config = ConfigDict( @@ -36,6 +45,7 @@ class SearchSynonymsResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, extra="allow", ) diff --git a/algoliasearch/search/models/search_user_ids_params.py b/algoliasearch/search/models/search_user_ids_params.py index 65e3ca1b0..a9ea03b6b 100644 --- a/algoliasearch/search/models/search_user_ids_params.py +++ b/algoliasearch/search/models/search_user_ids_params.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,18 +18,30 @@ from typing_extensions import Self +_ALIASES = { + "query": "query", + "cluster_name": "clusterName", + "page": "page", + "hits_per_page": "hitsPerPage", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class SearchUserIdsParams(BaseModel): """ OK """ - query: str = Field(alias="query") + query: str """ Query to search. The search is a prefix search with [typo tolerance](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/) enabled. An empty query will retrieve all users. """ - cluster_name: Optional[str] = Field(default=None, alias="clusterName") + cluster_name: Optional[str] = None """ Cluster name. """ - page: Optional[int] = Field(default=None, alias="page") + page: Optional[int] = None """ Page of search results to retrieve. """ - hits_per_page: Optional[int] = Field(default=None, alias="hitsPerPage") + hits_per_page: Optional[int] = None """ Number of hits per page. """ model_config = ConfigDict( @@ -37,6 +49,7 @@ class SearchUserIdsParams(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/search_user_ids_response.py b/algoliasearch/search/models/search_user_ids_response.py index 0cba7d6f1..1ebcbc8f5 100644 --- a/algoliasearch/search/models/search_user_ids_response.py +++ b/algoliasearch/search/models/search_user_ids_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,21 +20,33 @@ from algoliasearch.search.models.user_hit import UserHit +_ALIASES = { + "hits": "hits", + "nb_hits": "nbHits", + "page": "page", + "hits_per_page": "hitsPerPage", + "updated_at": "updatedAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SearchUserIdsResponse(BaseModel): """ userIDs data. """ - hits: List[UserHit] = Field(alias="hits") + hits: List[UserHit] """ User objects that match the query. """ - nb_hits: int = Field(alias="nbHits") + nb_hits: int """ Number of results (hits). """ - page: int = Field(alias="page") + page: int """ Page of search results to retrieve. """ - hits_per_page: int = Field(alias="hitsPerPage") + hits_per_page: int """ Maximum number of hits per page. """ - updated_at: str = Field(alias="updatedAt") + updated_at: str """ Date and time when the object was updated, in RFC 3339 format. """ model_config = ConfigDict( @@ -42,6 +54,7 @@ class SearchUserIdsResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/secured_api_key_restrictions.py b/algoliasearch/search/models/secured_api_key_restrictions.py index 756acabe3..3df6f68b6 100644 --- a/algoliasearch/search/models/secured_api_key_restrictions.py +++ b/algoliasearch/search/models/secured_api_key_restrictions.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,24 +20,35 @@ from algoliasearch.search.models.search_params_object import SearchParamsObject +_ALIASES = { + "search_params": "searchParams", + "filters": "filters", + "valid_until": "validUntil", + "restrict_indices": "restrictIndices", + "restrict_sources": "restrictSources", + "user_token": "userToken", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SecuredApiKeyRestrictions(BaseModel): """ SecuredApiKeyRestrictions """ - search_params: Optional[SearchParamsObject] = Field( - default=None, alias="searchParams" - ) - filters: Optional[str] = Field(default=None, alias="filters") + search_params: Optional[SearchParamsObject] = None + filters: Optional[str] = None """ Filters that apply to every search made with the secured API key. Extra filters added at search time will be combined with `AND`. For example, if you set `group:admin` as fixed filter on your generated API key, and add `groups:visitors` to the search query, the complete set of filters will be `group:admin AND groups:visitors`. """ - valid_until: Optional[int] = Field(default=None, alias="validUntil") + valid_until: Optional[int] = None """ Timestamp when the secured API key expires, measured in seconds since the Unix epoch. """ - restrict_indices: Optional[List[str]] = Field(default=None, alias="restrictIndices") + restrict_indices: Optional[List[str]] = None """ Index names or patterns that this API key can access. By default, an API key can access all indices in the same application. You can use leading and trailing wildcard characters (`*`): - `dev_*` matches all indices starting with \"dev_\". - `*_dev` matches all indices ending with \"_dev\". - `*_products_*` matches all indices containing \"_products_\". """ - restrict_sources: Optional[str] = Field(default=None, alias="restrictSources") + restrict_sources: Optional[str] = None """ IP network that are allowed to use this key. You can only add a single source, but you can provide a range of IP addresses. Use this to protect against API key leaking and reuse. """ - user_token: Optional[str] = Field(default=None, alias="userToken") + user_token: Optional[str] = None """ Pseudonymous user identifier to restrict usage of this API key to specific users. By default, rate limits are set based on IP addresses. This can be an issue if many users search from the same IP address. To avoid this, add a user token to each generated API key. """ model_config = ConfigDict( @@ -45,6 +56,7 @@ class SecuredApiKeyRestrictions(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/semantic_search.py b/algoliasearch/search/models/semantic_search.py index 31559fe7b..52efbbb4b 100644 --- a/algoliasearch/search/models/semantic_search.py +++ b/algoliasearch/search/models/semantic_search.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,12 +18,21 @@ from typing_extensions import Self +_ALIASES = { + "event_sources": "eventSources", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class SemanticSearch(BaseModel): """ Settings for the semantic search part of NeuralSearch. Only used when `mode` is `neuralSearch`. """ - event_sources: Optional[List[str]] = Field(default=None, alias="eventSources") + event_sources: Optional[List[str]] = None """ Indices from which to collect click and conversion events. If null, the current index and all its replicas are used. """ model_config = ConfigDict( @@ -31,6 +40,7 @@ class SemanticSearch(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/settings_response.py b/algoliasearch/search/models/settings_response.py index 3fb9e6ed3..c831b2687 100644 --- a/algoliasearch/search/models/settings_response.py +++ b/algoliasearch/search/models/settings_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -37,200 +37,193 @@ from algoliasearch.search.models.supported_language import SupportedLanguage from algoliasearch.search.models.typo_tolerance import TypoTolerance +_ALIASES = { + "attributes_for_faceting": "attributesForFaceting", + "replicas": "replicas", + "pagination_limited_to": "paginationLimitedTo", + "unretrievable_attributes": "unretrievableAttributes", + "disable_typo_tolerance_on_words": "disableTypoToleranceOnWords", + "attributes_to_transliterate": "attributesToTransliterate", + "camel_case_attributes": "camelCaseAttributes", + "decompounded_attributes": "decompoundedAttributes", + "index_languages": "indexLanguages", + "disable_prefix_on_attributes": "disablePrefixOnAttributes", + "allow_compression_of_integer_array": "allowCompressionOfIntegerArray", + "numeric_attributes_for_filtering": "numericAttributesForFiltering", + "separators_to_index": "separatorsToIndex", + "searchable_attributes": "searchableAttributes", + "user_data": "userData", + "custom_normalization": "customNormalization", + "attribute_for_distinct": "attributeForDistinct", + "attributes_to_retrieve": "attributesToRetrieve", + "ranking": "ranking", + "custom_ranking": "customRanking", + "relevancy_strictness": "relevancyStrictness", + "attributes_to_highlight": "attributesToHighlight", + "attributes_to_snippet": "attributesToSnippet", + "highlight_pre_tag": "highlightPreTag", + "highlight_post_tag": "highlightPostTag", + "snippet_ellipsis_text": "snippetEllipsisText", + "restrict_highlight_and_snippet_arrays": "restrictHighlightAndSnippetArrays", + "hits_per_page": "hitsPerPage", + "min_word_sizefor1_typo": "minWordSizefor1Typo", + "min_word_sizefor2_typos": "minWordSizefor2Typos", + "typo_tolerance": "typoTolerance", + "allow_typos_on_numeric_tokens": "allowTyposOnNumericTokens", + "disable_typo_tolerance_on_attributes": "disableTypoToleranceOnAttributes", + "ignore_plurals": "ignorePlurals", + "remove_stop_words": "removeStopWords", + "keep_diacritics_on_characters": "keepDiacriticsOnCharacters", + "query_languages": "queryLanguages", + "decompound_query": "decompoundQuery", + "enable_rules": "enableRules", + "enable_personalization": "enablePersonalization", + "query_type": "queryType", + "remove_words_if_no_results": "removeWordsIfNoResults", + "mode": "mode", + "semantic_search": "semanticSearch", + "advanced_syntax": "advancedSyntax", + "optional_words": "optionalWords", + "disable_exact_on_attributes": "disableExactOnAttributes", + "exact_on_single_word_query": "exactOnSingleWordQuery", + "alternatives_as_exact": "alternativesAsExact", + "advanced_syntax_features": "advancedSyntaxFeatures", + "distinct": "distinct", + "replace_synonyms_in_highlight": "replaceSynonymsInHighlight", + "min_proximity": "minProximity", + "response_fields": "responseFields", + "max_facet_hits": "maxFacetHits", + "max_values_per_facet": "maxValuesPerFacet", + "sort_facet_values_by": "sortFacetValuesBy", + "attribute_criteria_computed_by_min_proximity": "attributeCriteriaComputedByMinProximity", + "rendering_content": "renderingContent", + "enable_re_ranking": "enableReRanking", + "re_ranking_apply_filter": "reRankingApplyFilter", + "primary": "primary", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SettingsResponse(BaseModel): """ SettingsResponse """ - attributes_for_faceting: Optional[List[str]] = Field( - default=None, alias="attributesForFaceting" - ) + attributes_for_faceting: Optional[List[str]] = None """ Attributes used for [faceting](https://www.algolia.com/doc/guides/managing-results/refine-results/faceting/). Facets are attributes that let you categorize search results. They can be used for filtering search results. By default, no attribute is used for faceting. Attribute names are case-sensitive. **Modifiers** - `filterOnly(\"ATTRIBUTE\")`. Allows using this attribute as a filter, but doesn't evalue the facet values. - `searchable(\"ATTRIBUTE\")`. Allows searching for facet values. - `afterDistinct(\"ATTRIBUTE\")`. Evaluates the facet count _after_ deduplication with `distinct`. This ensures accurate facet counts. You can apply this modifier to searchable facets: `afterDistinct(searchable(ATTRIBUTE))`. """ - replicas: Optional[List[str]] = Field(default=None, alias="replicas") + replicas: Optional[List[str]] = None """ Creates [replica indices](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/in-depth/replicas/). Replicas are copies of a primary index with the same records but different settings, synonyms, or rules. If you want to offer a different ranking or sorting of your search results, you'll use replica indices. All index operations on a primary index are automatically forwarded to its replicas. To add a replica index, you must provide the complete set of replicas to this parameter. If you omit a replica from this list, the replica turns into a regular, standalone index that will no longer by synced with the primary index. **Modifier** - `virtual(\"REPLICA\")`. Create a virtual replica, Virtual replicas don't increase the number of records and are optimized for [Relevant sorting](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/in-depth/relevant-sort/). """ - pagination_limited_to: Optional[int] = Field( - default=None, alias="paginationLimitedTo" - ) + pagination_limited_to: Optional[int] = None """ Maximum number of search results that can be obtained through pagination. Higher pagination limits might slow down your search. For pagination limits above 1,000, the sorting of results beyond the 1,000th hit can't be guaranteed. """ - unretrievable_attributes: Optional[List[str]] = Field( - default=None, alias="unretrievableAttributes" - ) + unretrievable_attributes: Optional[List[str]] = None """ Attributes that can't be retrieved at query time. This can be useful if you want to use an attribute for ranking or to [restrict access](https://www.algolia.com/doc/guides/security/api-keys/how-to/user-restricted-access-to-data/), but don't want to include it in the search results. Attribute names are case-sensitive. """ - disable_typo_tolerance_on_words: Optional[List[str]] = Field( - default=None, alias="disableTypoToleranceOnWords" - ) + disable_typo_tolerance_on_words: Optional[List[str]] = None """ Words for which you want to turn off [typo tolerance](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/). This also turns off [word splitting and concatenation](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/splitting-and-concatenation/) for the specified words. """ - attributes_to_transliterate: Optional[List[str]] = Field( - default=None, alias="attributesToTransliterate" - ) + attributes_to_transliterate: Optional[List[str]] = None """ Attributes, for which you want to support [Japanese transliteration](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/#japanese-transliteration-and-type-ahead). Transliteration supports searching in any of the Japanese writing systems. To support transliteration, you must set the indexing language to Japanese. Attribute names are case-sensitive. """ - camel_case_attributes: Optional[List[str]] = Field( - default=None, alias="camelCaseAttributes" - ) + camel_case_attributes: Optional[List[str]] = None """ Attributes for which to split [camel case](https://wikipedia.org/wiki/Camel_case) words. Attribute names are case-sensitive. """ - decompounded_attributes: Optional[object] = Field( - default=None, alias="decompoundedAttributes" - ) + decompounded_attributes: Optional[object] = None """ Searchable attributes to which Algolia should apply [word segmentation](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/how-to/customize-segmentation/) (decompounding). Attribute names are case-sensitive. Compound words are formed by combining two or more individual words, and are particularly prevalent in Germanic languages—for example, \"firefighter\". With decompounding, the individual components are indexed separately. You can specify different lists for different languages. Decompounding is supported for these languages: Dutch (`nl`), German (`de`), Finnish (`fi`), Danish (`da`), Swedish (`sv`), and Norwegian (`no`). Decompounding doesn't work for words with [non-spacing mark Unicode characters](https://www.charactercodes.net/category/non-spacing_mark). For example, `Gartenstühle` won't be decompounded if the `ü` consists of `u` (U+0075) and `◌̈` (U+0308). """ - index_languages: Optional[List[SupportedLanguage]] = Field( - default=None, alias="indexLanguages" - ) + index_languages: Optional[List[SupportedLanguage]] = None """ Languages for language-specific processing steps, such as word detection and dictionary settings. **You should always specify an indexing language.** If you don't specify an indexing language, the search engine uses all [supported languages](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/supported-languages/), or the languages you specified with the `ignorePlurals` or `removeStopWords` parameters. This can lead to unexpected search results. For more information, see [Language-specific configuration](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/). """ - disable_prefix_on_attributes: Optional[List[str]] = Field( - default=None, alias="disablePrefixOnAttributes" - ) + disable_prefix_on_attributes: Optional[List[str]] = None """ Searchable attributes for which you want to turn off [prefix matching](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/override-search-engine-defaults/#adjusting-prefix-search). Attribute names are case-sensitive. """ - allow_compression_of_integer_array: Optional[bool] = Field( - default=None, alias="allowCompressionOfIntegerArray" - ) + allow_compression_of_integer_array: Optional[bool] = None """ Whether arrays with exclusively non-negative integers should be compressed for better performance. If true, the compressed arrays may be reordered. """ - numeric_attributes_for_filtering: Optional[List[str]] = Field( - default=None, alias="numericAttributesForFiltering" - ) + numeric_attributes_for_filtering: Optional[List[str]] = None """ Numeric attributes that can be used as [numerical filters](https://www.algolia.com/doc/guides/managing-results/rules/detecting-intent/how-to/applying-a-custom-filter-for-a-specific-query/#numerical-filters). Attribute names are case-sensitive. By default, all numeric attributes are available as numerical filters. For faster indexing, reduce the number of numeric attributes. If you want to turn off filtering for all numeric attributes, specifiy an attribute that doesn't exist in your index, such as `NO_NUMERIC_FILTERING`. **Modifier** - `equalOnly(\"ATTRIBUTE\")`. Support only filtering based on equality comparisons `=` and `!=`. """ - separators_to_index: Optional[str] = Field(default=None, alias="separatorsToIndex") + separators_to_index: Optional[str] = None """ Controls which separators are indexed. Separators are all non-letter characters except spaces and currency characters, such as $€£¥. By default, separator characters aren't indexed. With `separatorsToIndex`, Algolia treats separator characters as separate words. For example, a search for `C#` would report two matches. """ - searchable_attributes: Optional[List[str]] = Field( - default=None, alias="searchableAttributes" - ) + searchable_attributes: Optional[List[str]] = None """ Attributes used for searching. Attribute names are case-sensitive. By default, all attributes are searchable and the [Attribute](https://www.algolia.com/doc/guides/managing-results/relevance-overview/in-depth/ranking-criteria/#attribute) ranking criterion is turned off. With a non-empty list, Algolia only returns results with matches in the selected attributes. In addition, the Attribute ranking criterion is turned on: matches in attributes that are higher in the list of `searchableAttributes` rank first. To make matches in two attributes rank equally, include them in a comma-separated string, such as `\"title,alternate_title\"`. Attributes with the same priority are always unordered. For more information, see [Searchable attributes](https://www.algolia.com/doc/guides/sending-and-managing-data/prepare-your-data/how-to/setting-searchable-attributes/). **Modifier** - `unordered(\"ATTRIBUTE\")`. Ignore the position of a match within the attribute. Without modifier, matches at the beginning of an attribute rank higer than matches at the end. """ - user_data: Optional[object] = Field(default=None, alias="userData") + user_data: Optional[object] = None """ An object with custom data. You can store up to 32kB as custom data. """ - custom_normalization: Optional[Dict[str, Dict[str, str]]] = Field( - default=None, alias="customNormalization" - ) + custom_normalization: Optional[Dict[str, Dict[str, str]]] = None """ Characters and their normalized replacements. This overrides Algolia's default [normalization](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/normalization/). """ - attribute_for_distinct: Optional[str] = Field( - default=None, alias="attributeForDistinct" - ) + attribute_for_distinct: Optional[str] = None """ Attribute that should be used to establish groups of results. Attribute names are case-sensitive. All records with the same value for this attribute are considered a group. You can combine `attributeForDistinct` with the `distinct` search parameter to control how many items per group are included in the search results. If you want to use the same attribute also for faceting, use the `afterDistinct` modifier of the `attributesForFaceting` setting. This applies faceting _after_ deduplication, which will result in accurate facet counts. """ - attributes_to_retrieve: Optional[List[str]] = Field( - default=None, alias="attributesToRetrieve" - ) + attributes_to_retrieve: Optional[List[str]] = None """ Attributes to include in the API response. To reduce the size of your response, you can retrieve only some of the attributes. Attribute names are case-sensitive. - `*` retrieves all attributes, except attributes included in the `customRanking` and `unretrievableAttributes` settings. - To retrieve all attributes except a specific one, prefix the attribute with a dash and combine it with the `*`: `[\"*\", \"-ATTRIBUTE\"]`. - The `objectID` attribute is always included. """ - ranking: Optional[List[str]] = Field(default=None, alias="ranking") + ranking: Optional[List[str]] = None """ Determines the order in which Algolia returns your results. By default, each entry corresponds to a [ranking criteria](https://www.algolia.com/doc/guides/managing-results/relevance-overview/in-depth/ranking-criteria/). The tie-breaking algorithm sequentially applies each criterion in the order they're specified. If you configure a replica index for [sorting by an attribute](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/how-to/sort-by-attribute/), you put the sorting attribute at the top of the list. **Modifiers** - `asc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in ascending order. - `desc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in descending order. Before you modify the default setting, you should test your changes in the dashboard, and by [A/B testing](https://www.algolia.com/doc/guides/ab-testing/what-is-ab-testing/). """ - custom_ranking: Optional[List[str]] = Field(default=None, alias="customRanking") + custom_ranking: Optional[List[str]] = None """ Attributes to use as [custom ranking](https://www.algolia.com/doc/guides/managing-results/must-do/custom-ranking/). Attribute names are case-sensitive. The custom ranking attributes decide which items are shown first if the other ranking criteria are equal. Records with missing values for your selected custom ranking attributes are always sorted last. Boolean attributes are sorted based on their alphabetical order. **Modifiers** - `asc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in ascending order. - `desc(\"ATTRIBUTE\")`. Sort the index by the values of an attribute, in descending order. If you use two or more custom ranking attributes, [reduce the precision](https://www.algolia.com/doc/guides/managing-results/must-do/custom-ranking/how-to/controlling-custom-ranking-metrics-precision/) of your first attributes, or the other attributes will never be applied. """ - relevancy_strictness: Optional[int] = Field( - default=None, alias="relevancyStrictness" - ) + relevancy_strictness: Optional[int] = None """ Relevancy threshold below which less relevant results aren't included in the results. You can only set `relevancyStrictness` on [virtual replica indices](https://www.algolia.com/doc/guides/managing-results/refine-results/sorting/in-depth/replicas/#what-are-virtual-replicas). Use this setting to strike a balance between the relevance and number of returned results. """ - attributes_to_highlight: Optional[List[str]] = Field( - default=None, alias="attributesToHighlight" - ) + attributes_to_highlight: Optional[List[str]] = None """ Attributes to highlight. By default, all searchable attributes are highlighted. Use `*` to highlight all attributes or use an empty array `[]` to turn off highlighting. Attribute names are case-sensitive. With highlighting, strings that match the search query are surrounded by HTML tags defined by `highlightPreTag` and `highlightPostTag`. You can use this to visually highlight matching parts of a search query in your UI. For more information, see [Highlighting and snippeting](https://www.algolia.com/doc/guides/building-search-ui/ui-and-ux-patterns/highlighting-snippeting/js/). """ - attributes_to_snippet: Optional[List[str]] = Field( - default=None, alias="attributesToSnippet" - ) + attributes_to_snippet: Optional[List[str]] = None """ Attributes for which to enable snippets. Attribute names are case-sensitive. Snippets provide additional context to matched words. If you enable snippets, they include 10 words, including the matched word. The matched word will also be wrapped by HTML tags for highlighting. You can adjust the number of words with the following notation: `ATTRIBUTE:NUMBER`, where `NUMBER` is the number of words to be extracted. """ - highlight_pre_tag: Optional[str] = Field(default=None, alias="highlightPreTag") + highlight_pre_tag: Optional[str] = None """ HTML tag to insert before the highlighted parts in all highlighted results and snippets. """ - highlight_post_tag: Optional[str] = Field(default=None, alias="highlightPostTag") + highlight_post_tag: Optional[str] = None """ HTML tag to insert after the highlighted parts in all highlighted results and snippets. """ - snippet_ellipsis_text: Optional[str] = Field( - default=None, alias="snippetEllipsisText" - ) + snippet_ellipsis_text: Optional[str] = None """ String used as an ellipsis indicator when a snippet is truncated. """ - restrict_highlight_and_snippet_arrays: Optional[bool] = Field( - default=None, alias="restrictHighlightAndSnippetArrays" - ) + restrict_highlight_and_snippet_arrays: Optional[bool] = None """ Whether to restrict highlighting and snippeting to items that at least partially matched the search query. By default, all items are highlighted and snippeted. """ - hits_per_page: Optional[int] = Field(default=None, alias="hitsPerPage") + hits_per_page: Optional[int] = None """ Number of hits per page. """ - min_word_sizefor1_typo: Optional[int] = Field( - default=None, alias="minWordSizefor1Typo" - ) + min_word_sizefor1_typo: Optional[int] = None """ Minimum number of characters a word in the search query must contain to accept matches with [one typo](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/in-depth/configuring-typo-tolerance/#configuring-word-length-for-typos). """ - min_word_sizefor2_typos: Optional[int] = Field( - default=None, alias="minWordSizefor2Typos" - ) + min_word_sizefor2_typos: Optional[int] = None """ Minimum number of characters a word in the search query must contain to accept matches with [two typos](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/in-depth/configuring-typo-tolerance/#configuring-word-length-for-typos). """ - typo_tolerance: Optional[TypoTolerance] = Field(default=None, alias="typoTolerance") - allow_typos_on_numeric_tokens: Optional[bool] = Field( - default=None, alias="allowTyposOnNumericTokens" - ) + typo_tolerance: Optional[TypoTolerance] = None + allow_typos_on_numeric_tokens: Optional[bool] = None """ Whether to allow typos on numbers in the search query. Turn off this setting to reduce the number of irrelevant matches when searching in large sets of similar numbers. """ - disable_typo_tolerance_on_attributes: Optional[List[str]] = Field( - default=None, alias="disableTypoToleranceOnAttributes" - ) + disable_typo_tolerance_on_attributes: Optional[List[str]] = None """ Attributes for which you want to turn off [typo tolerance](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/). Attribute names are case-sensitive. Returning only exact matches can help when: - [Searching in hyphenated attributes](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/typo-tolerance/how-to/how-to-search-in-hyphenated-attributes/). - Reducing the number of matches when you have too many. This can happen with attributes that are long blocks of text, such as product descriptions. Consider alternatives such as `disableTypoToleranceOnWords` or adding synonyms if your attributes have intentional unusual spellings that might look like typos. """ - ignore_plurals: Optional[IgnorePlurals] = Field(default=None, alias="ignorePlurals") - remove_stop_words: Optional[RemoveStopWords] = Field( - default=None, alias="removeStopWords" - ) - keep_diacritics_on_characters: Optional[str] = Field( - default=None, alias="keepDiacriticsOnCharacters" - ) + ignore_plurals: Optional[IgnorePlurals] = None + remove_stop_words: Optional[RemoveStopWords] = None + keep_diacritics_on_characters: Optional[str] = None """ Characters for which diacritics should be preserved. By default, Algolia removes diacritics from letters. For example, `é` becomes `e`. If this causes issues in your search, you can specify characters that should keep their diacritics. """ - query_languages: Optional[List[SupportedLanguage]] = Field( - default=None, alias="queryLanguages" - ) + query_languages: Optional[List[SupportedLanguage]] = None """ Languages for language-specific query processing steps such as plurals, stop-word removal, and word-detection dictionaries. This setting sets a default list of languages used by the `removeStopWords` and `ignorePlurals` settings. This setting also sets a dictionary for word detection in the logogram-based [CJK](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/normalization/#normalization-for-logogram-based-languages-cjk) languages. To support this, you must place the CJK language **first**. **You should always specify a query language.** If you don't specify an indexing language, the search engine uses all [supported languages](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/supported-languages/), or the languages you specified with the `ignorePlurals` or `removeStopWords` parameters. This can lead to unexpected search results. For more information, see [Language-specific configuration](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/). """ - decompound_query: Optional[bool] = Field(default=None, alias="decompoundQuery") + decompound_query: Optional[bool] = None """ Whether to split compound words in the query into their building blocks. For more information, see [Word segmentation](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/language-specific-configurations/#splitting-compound-words). Word segmentation is supported for these languages: German, Dutch, Finnish, Swedish, and Norwegian. Decompounding doesn't work for words with [non-spacing mark Unicode characters](https://www.charactercodes.net/category/non-spacing_mark). For example, `Gartenstühle` won't be decompounded if the `ü` consists of `u` (U+0075) and `◌̈` (U+0308). """ - enable_rules: Optional[bool] = Field(default=None, alias="enableRules") + enable_rules: Optional[bool] = None """ Whether to enable rules. """ - enable_personalization: Optional[bool] = Field( - default=None, alias="enablePersonalization" - ) + enable_personalization: Optional[bool] = None """ Whether to enable Personalization. """ - query_type: Optional[QueryType] = Field(default=None, alias="queryType") - remove_words_if_no_results: Optional[RemoveWordsIfNoResults] = Field( - default=None, alias="removeWordsIfNoResults" - ) - mode: Optional[Mode] = Field(default=None, alias="mode") - semantic_search: Optional[SemanticSearch] = Field( - default=None, alias="semanticSearch" - ) - advanced_syntax: Optional[bool] = Field(default=None, alias="advancedSyntax") + query_type: Optional[QueryType] = None + remove_words_if_no_results: Optional[RemoveWordsIfNoResults] = None + mode: Optional[Mode] = None + semantic_search: Optional[SemanticSearch] = None + advanced_syntax: Optional[bool] = None """ Whether to support phrase matching and excluding words from search queries. Use the `advancedSyntaxFeatures` parameter to control which feature is supported. """ - optional_words: Optional[List[str]] = Field(default=None, alias="optionalWords") + optional_words: Optional[List[str]] = None """ Words that should be considered optional when found in the query. By default, records must match all words in the search query to be included in the search results. Adding optional words can help to increase the number of search results by running an additional search query that doesn't include the optional words. For example, if the search query is \"action video\" and \"video\" is an optional word, the search engine runs two queries. One for \"action video\" and one for \"action\". Records that match all words are ranked higher. For a search query with 4 or more words **and** all its words are optional, the number of matched words required for a record to be included in the search results increases for every 1,000 records: - If `optionalWords` has less than 10 words, the required number of matched words increases by 1: results 1 to 1,000 require 1 matched word, results 1,001 to 2000 need 2 matched words. - If `optionalWords` has 10 or more words, the number of required matched words increases by the number of optional words dividied by 5 (rounded down). For example, with 18 optional words: results 1 to 1,000 require 1 matched word, results 1,001 to 2000 need 4 matched words. For more information, see [Optional words](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/empty-or-insufficient-results/#creating-a-list-of-optional-words). """ - disable_exact_on_attributes: Optional[List[str]] = Field( - default=None, alias="disableExactOnAttributes" - ) + disable_exact_on_attributes: Optional[List[str]] = None """ Searchable attributes for which you want to [turn off the Exact ranking criterion](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/override-search-engine-defaults/in-depth/adjust-exact-settings/#turn-off-exact-for-some-attributes). Attribute names are case-sensitive. This can be useful for attributes with long values, where the likelyhood of an exact match is high, such as product descriptions. Turning off the Exact ranking criterion for these attributes favors exact matching on other attributes. This reduces the impact of individual attributes with a lot of content on ranking. """ - exact_on_single_word_query: Optional[ExactOnSingleWordQuery] = Field( - default=None, alias="exactOnSingleWordQuery" - ) - alternatives_as_exact: Optional[List[AlternativesAsExact]] = Field( - default=None, alias="alternativesAsExact" - ) + exact_on_single_word_query: Optional[ExactOnSingleWordQuery] = None + alternatives_as_exact: Optional[List[AlternativesAsExact]] = None """ Alternatives of query words that should be considered as exact matches by the Exact ranking criterion. - `ignorePlurals`. Plurals and similar declensions added by the `ignorePlurals` setting are considered exact matches. - `singleWordSynonym`. Single-word synonyms, such as \"NY/NYC\" are considered exact matches. - `multiWordsSynonym`. Multi-word synonyms, such as \"NY/New York\" are considered exact matches. """ - advanced_syntax_features: Optional[List[AdvancedSyntaxFeatures]] = Field( - default=None, alias="advancedSyntaxFeatures" - ) + advanced_syntax_features: Optional[List[AdvancedSyntaxFeatures]] = None """ Advanced search syntax features you want to support. - `exactPhrase`. Phrases in quotes must match exactly. For example, `sparkly blue \"iPhone case\"` only returns records with the exact string \"iPhone case\". - `excludeWords`. Query words prefixed with a `-` must not occur in a record. For example, `search -engine` matches records that contain \"search\" but not \"engine\". This setting only has an effect if `advancedSyntax` is true. """ - distinct: Optional[Distinct] = Field(default=None, alias="distinct") - replace_synonyms_in_highlight: Optional[bool] = Field( - default=None, alias="replaceSynonymsInHighlight" - ) + distinct: Optional[Distinct] = None + replace_synonyms_in_highlight: Optional[bool] = None """ Whether to replace a highlighted word with the matched synonym. By default, the original words are highlighted even if a synonym matches. For example, with `home` as a synonym for `house` and a search for `home`, records matching either \"home\" or \"house\" are included in the search results, and either \"home\" or \"house\" are highlighted. With `replaceSynonymsInHighlight` set to `true`, a search for `home` still matches the same records, but all occurences of \"house\" are replaced by \"home\" in the highlighted response. """ - min_proximity: Optional[int] = Field(default=None, alias="minProximity") + min_proximity: Optional[int] = None """ Minimum proximity score for two matching words. This adjusts the [Proximity ranking criterion](https://www.algolia.com/doc/guides/managing-results/relevance-overview/in-depth/ranking-criteria/#proximity) by equally scoring matches that are farther apart. For example, if `minProximity` is 2, neighboring matches and matches with one word between them would have the same score. """ - response_fields: Optional[List[str]] = Field(default=None, alias="responseFields") + response_fields: Optional[List[str]] = None """ Properties to include in the API response of `search` and `browse` requests. By default, all response properties are included. To reduce the response size, you can select, which attributes should be included. You can't exclude these properties: `message`, `warning`, `cursor`, `serverUsed`, `indexUsed`, `abTestVariantID`, `parsedQuery`, or any property triggered by the `getRankingInfo` parameter. Don't exclude properties that you might need in your search UI. """ - max_facet_hits: Optional[int] = Field(default=None, alias="maxFacetHits") + max_facet_hits: Optional[int] = None """ Maximum number of facet values to return when [searching for facet values](https://www.algolia.com/doc/guides/managing-results/refine-results/faceting/#search-for-facet-values). """ - max_values_per_facet: Optional[int] = Field(default=None, alias="maxValuesPerFacet") + max_values_per_facet: Optional[int] = None """ Maximum number of facet values to return for each facet. """ - sort_facet_values_by: Optional[str] = Field(default=None, alias="sortFacetValuesBy") + sort_facet_values_by: Optional[str] = None """ Order in which to retrieve facet values. - `count`. Facet values are retrieved by decreasing count. The count is the number of matching records containing this facet value. - `alpha`. Retrieve facet values alphabetically. This setting doesn't influence how facet values are displayed in your UI (see `renderingContent`). For more information, see [facet value display](https://www.algolia.com/doc/guides/building-search-ui/ui-and-ux-patterns/facet-display/js/). """ - attribute_criteria_computed_by_min_proximity: Optional[bool] = Field( - default=None, alias="attributeCriteriaComputedByMinProximity" - ) + attribute_criteria_computed_by_min_proximity: Optional[bool] = None """ Whether the best matching attribute should be determined by minimum proximity. This setting only affects ranking if the Attribute ranking criterion comes before Proximity in the `ranking` setting. If true, the best matching attribute is selected based on the minimum proximity of multiple matches. Otherwise, the best matching attribute is determined by the order in the `searchableAttributes` setting. """ - rendering_content: Optional[RenderingContent] = Field( - default=None, alias="renderingContent" - ) - enable_re_ranking: Optional[bool] = Field(default=None, alias="enableReRanking") + rendering_content: Optional[RenderingContent] = None + enable_re_ranking: Optional[bool] = None """ Whether this search will use [Dynamic Re-Ranking](https://www.algolia.com/doc/guides/algolia-ai/re-ranking/). This setting only has an effect if you activated Dynamic Re-Ranking for this index in the Algolia dashboard. """ - re_ranking_apply_filter: Optional[ReRankingApplyFilter] = Field( - default=None, alias="reRankingApplyFilter" - ) - primary: Optional[str] = Field(default=None, alias="primary") + re_ranking_apply_filter: Optional[ReRankingApplyFilter] = None + primary: Optional[str] = None """ Replica indices only: the name of the primary index for this replica. """ model_config = ConfigDict( @@ -238,6 +231,7 @@ class SettingsResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/snippet_result.py b/algoliasearch/search/models/snippet_result.py index e1a42415f..ea056ce36 100644 --- a/algoliasearch/search/models/snippet_result.py +++ b/algoliasearch/search/models/snippet_result.py @@ -32,8 +32,8 @@ class SnippetResult(BaseModel): """ Snippets that show the context around a matching search query. """ oneof_schema_3_validator: Optional[List[SnippetResult]] = Field(default=None) """ Snippets that show the context around a matching search query. """ - actual_instance: Optional[ - Union[Dict[str, SnippetResult], List[SnippetResult], SnippetResultOption] + actual_instance: Union[ + Dict[str, SnippetResult], List[SnippetResult], SnippetResultOption, None ] = None one_of_schemas: Set[str] = { "Dict[str, SnippetResult]", @@ -51,15 +51,15 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer def unwrap_actual_instance( self, - ) -> Optional[ - Union[Dict[str, SnippetResult], List[SnippetResult], SnippetResultOption] + ) -> Union[ + Dict[str, SnippetResult], List[SnippetResult], SnippetResultOption, Self, None ]: """ Unwraps the `actual_instance` when calling the `to_json` method. @@ -109,9 +109,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -130,8 +130,8 @@ def to_dict( return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/search/models/snippet_result_option.py b/algoliasearch/search/models/snippet_result_option.py index 2e35f7f79..f4b4fd119 100644 --- a/algoliasearch/search/models/snippet_result_option.py +++ b/algoliasearch/search/models/snippet_result_option.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,21 +20,31 @@ from algoliasearch.search.models.match_level import MatchLevel +_ALIASES = { + "value": "value", + "match_level": "matchLevel", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SnippetResultOption(BaseModel): """ Snippets that show the context around a matching search query. """ - value: str = Field(alias="value") + value: str """ Highlighted attribute value, including HTML tags. """ - match_level: MatchLevel = Field(alias="matchLevel") + match_level: MatchLevel model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/source.py b/algoliasearch/search/models/source.py index 1389545a9..3359bbc72 100644 --- a/algoliasearch/search/models/source.py +++ b/algoliasearch/search/models/source.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "source": "source", + "description": "description", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class Source(BaseModel): """ Source. """ - source: str = Field(alias="source") + source: str """ IP address range of the source. """ - description: Optional[str] = Field(default=None, alias="description") + description: Optional[str] = None """ Source description. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class Source(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/standard_entries.py b/algoliasearch/search/models/standard_entries.py index 88327d8bc..6caa148c8 100644 --- a/algoliasearch/search/models/standard_entries.py +++ b/algoliasearch/search/models/standard_entries.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "plurals": "plurals", + "stopwords": "stopwords", + "compounds": "compounds", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class StandardEntries(BaseModel): """ Key-value pairs of [supported language ISO codes](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/handling-natural-languages-nlp/in-depth/supported-languages/) and boolean values. """ - plurals: Optional[Dict[str, bool]] = Field(default=None, alias="plurals") + plurals: Optional[Dict[str, bool]] = None """ Key-value pair of a language ISO code and a boolean value. """ - stopwords: Optional[Dict[str, bool]] = Field(default=None, alias="stopwords") + stopwords: Optional[Dict[str, bool]] = None """ Key-value pair of a language ISO code and a boolean value. """ - compounds: Optional[Dict[str, bool]] = Field(default=None, alias="compounds") + compounds: Optional[Dict[str, bool]] = None """ Key-value pair of a language ISO code and a boolean value. """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class StandardEntries(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/synonym_hit.py b/algoliasearch/search/models/synonym_hit.py index a53896094..1d564e055 100644 --- a/algoliasearch/search/models/synonym_hit.py +++ b/algoliasearch/search/models/synonym_hit.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,26 +20,41 @@ from algoliasearch.search.models.synonym_type import SynonymType +_ALIASES = { + "object_id": "objectID", + "type": "type", + "synonyms": "synonyms", + "input": "input", + "word": "word", + "corrections": "corrections", + "placeholder": "placeholder", + "replacements": "replacements", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class SynonymHit(BaseModel): """ Synonym object. """ - object_id: str = Field(alias="objectID") + object_id: str """ Unique identifier of a synonym object. """ - type: SynonymType = Field(alias="type") - synonyms: Optional[List[str]] = Field(default=None, alias="synonyms") + type: SynonymType + synonyms: Optional[List[str]] = None """ Words or phrases considered equivalent. """ - input: Optional[str] = Field(default=None, alias="input") + input: Optional[str] = None """ Word or phrase to appear in query strings (for [`onewaysynonym`s](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/adding-synonyms/in-depth/one-way-synonyms/)). """ - word: Optional[str] = Field(default=None, alias="word") + word: Optional[str] = None """ Word or phrase to appear in query strings (for [`altcorrection1` and `altcorrection2`](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/adding-synonyms/in-depth/synonyms-alternative-corrections/)). """ - corrections: Optional[List[str]] = Field(default=None, alias="corrections") + corrections: Optional[List[str]] = None """ Words to be matched in records. """ - placeholder: Optional[str] = Field(default=None, alias="placeholder") + placeholder: Optional[str] = None """ [Placeholder token](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/adding-synonyms/in-depth/synonyms-placeholders/) to be put inside records. """ - replacements: Optional[List[str]] = Field(default=None, alias="replacements") + replacements: Optional[List[str]] = None """ Query words that will match the [placeholder token](https://www.algolia.com/doc/guides/managing-results/optimize-search-results/adding-synonyms/in-depth/synonyms-placeholders/). """ model_config = ConfigDict( @@ -47,6 +62,7 @@ class SynonymHit(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/tag_filters.py b/algoliasearch/search/models/tag_filters.py index eb88ff91e..7aed9674a 100644 --- a/algoliasearch/search/models/tag_filters.py +++ b/algoliasearch/search/models/tag_filters.py @@ -27,7 +27,7 @@ class TagFilters(BaseModel): oneof_schema_2_validator: Optional[str] = Field(default=None) - actual_instance: Optional[Union[List[TagFilters], str]] = None + actual_instance: Union[List[TagFilters], str, None] = None one_of_schemas: Set[str] = {"List[TagFilters]", "str"} def __init__(self, *args, **kwargs) -> None: @@ -40,12 +40,12 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[List[TagFilters], str]]: + def unwrap_actual_instance(self) -> Union[List[TagFilters], str, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -88,9 +88,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -100,8 +100,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], List[TagFilters], str]]: return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/search/models/time_range.py b/algoliasearch/search/models/time_range.py index d0bfe155b..b472ed698 100644 --- a/algoliasearch/search/models/time_range.py +++ b/algoliasearch/search/models/time_range.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "var_from": "from", + "until": "until", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class TimeRange(BaseModel): """ TimeRange """ - var_from: int = Field(alias="from") + var_from: int """ When the rule should start to be active, in Unix epoch time. """ - until: int = Field(alias="until") + until: int """ When the rule should stop to be active, in Unix epoch time. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class TimeRange(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/typo_tolerance.py b/algoliasearch/search/models/typo_tolerance.py index 7de8f11bd..cedad4704 100644 --- a/algoliasearch/search/models/typo_tolerance.py +++ b/algoliasearch/search/models/typo_tolerance.py @@ -30,7 +30,7 @@ class TypoTolerance(BaseModel): """ Whether typo tolerance is active. If true, matches with typos are included in the search results and rank after exact matches. """ oneof_schema_2_validator: Optional[TypoToleranceEnum] = Field(default=None) - actual_instance: Optional[Union[TypoToleranceEnum, bool]] = None + actual_instance: Union[TypoToleranceEnum, bool, None] = None one_of_schemas: Set[str] = {"TypoToleranceEnum", "bool"} def __init__(self, *args, **kwargs) -> None: @@ -43,12 +43,12 @@ def __init__(self, *args, **kwargs) -> None: raise ValueError( "If a position argument is used, keyword arguments cannot be used." ) - super().__init__(actual_instance=args[0]) + super().__init__(actual_instance=args[0]) # pyright: ignore else: super().__init__(**kwargs) @model_serializer - def unwrap_actual_instance(self) -> Optional[Union[TypoToleranceEnum, bool]]: + def unwrap_actual_instance(self) -> Union[TypoToleranceEnum, bool, Self, None]: """ Unwraps the `actual_instance` when calling the `to_json` method. """ @@ -90,9 +90,9 @@ def to_json(self) -> str: return "null" if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json + self.actual_instance.to_json # pyright: ignore ): - return self.actual_instance.to_json() + return self.actual_instance.to_json() # pyright: ignore else: return dumps(self.actual_instance) @@ -102,8 +102,8 @@ def to_dict(self) -> Optional[Union[Dict[str, Any], TypoToleranceEnum, bool]]: return None if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict + self.actual_instance.to_dict # pyright: ignore ): - return self.actual_instance.to_dict() + return self.actual_instance.to_dict() # pyright: ignore else: - return self.actual_instance + return self.actual_instance # pyright: ignore diff --git a/algoliasearch/search/models/update_api_key_response.py b/algoliasearch/search/models/update_api_key_response.py index 14a3ade0f..a6ba370fb 100644 --- a/algoliasearch/search/models/update_api_key_response.py +++ b/algoliasearch/search/models/update_api_key_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "key": "key", + "updated_at": "updatedAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class UpdateApiKeyResponse(BaseModel): """ UpdateApiKeyResponse """ - key: str = Field(alias="key") + key: str """ API key. """ - updated_at: str = Field(alias="updatedAt") + updated_at: str """ Date and time when the object was updated, in RFC 3339 format. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class UpdateApiKeyResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/updated_at_response.py b/algoliasearch/search/models/updated_at_response.py index a9a66cb96..52a0046ce 100644 --- a/algoliasearch/search/models/updated_at_response.py +++ b/algoliasearch/search/models/updated_at_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,14 +18,24 @@ from typing_extensions import Self +_ALIASES = { + "task_id": "taskID", + "updated_at": "updatedAt", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class UpdatedAtResponse(BaseModel): """ Response, taskID, and update timestamp. """ - task_id: int = Field(alias="taskID") + task_id: int """ Unique identifier of a task. A successful API response means that a task was added to a queue. It might not run immediately. You can check the task's progress with the [`task` operation](#tag/Indices/operation/getTask) and this `taskID`. """ - updated_at: str = Field(alias="updatedAt") + updated_at: str """ Date and time when the object was updated, in RFC 3339 format. """ model_config = ConfigDict( @@ -33,6 +43,7 @@ class UpdatedAtResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/updated_at_with_object_id_response.py b/algoliasearch/search/models/updated_at_with_object_id_response.py index e376610e8..664b9d648 100644 --- a/algoliasearch/search/models/updated_at_with_object_id_response.py +++ b/algoliasearch/search/models/updated_at_with_object_id_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "task_id": "taskID", + "updated_at": "updatedAt", + "object_id": "objectID", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class UpdatedAtWithObjectIdResponse(BaseModel): """ Response, taskID, unique object identifier, and an update timestamp. """ - task_id: Optional[int] = Field(default=None, alias="taskID") + task_id: Optional[int] = None """ Unique identifier of a task. A successful API response means that a task was added to a queue. It might not run immediately. You can check the task's progress with the [`task` operation](#tag/Indices/operation/getTask) and this `taskID`. """ - updated_at: Optional[str] = Field(default=None, alias="updatedAt") + updated_at: Optional[str] = None """ Date and time when the object was updated, in RFC 3339 format. """ - object_id: Optional[str] = Field(default=None, alias="objectID") + object_id: Optional[str] = None """ Unique record identifier. """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class UpdatedAtWithObjectIdResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/updated_rule_response.py b/algoliasearch/search/models/updated_rule_response.py index fa0a245c1..2e9368663 100644 --- a/algoliasearch/search/models/updated_rule_response.py +++ b/algoliasearch/search/models/updated_rule_response.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -18,16 +18,27 @@ from typing_extensions import Self +_ALIASES = { + "object_id": "objectID", + "updated_at": "updatedAt", + "task_id": "taskID", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class UpdatedRuleResponse(BaseModel): """ UpdatedRuleResponse """ - object_id: str = Field(alias="objectID") + object_id: str """ Unique identifier of a rule object. """ - updated_at: str = Field(alias="updatedAt") + updated_at: str """ Date and time when the object was updated, in RFC 3339 format. """ - task_id: int = Field(alias="taskID") + task_id: int """ Unique identifier of a task. A successful API response means that a task was added to a queue. It might not run immediately. You can check the task's progress with the [`task` operation](#tag/Indices/operation/getTask) and this `taskID`. """ model_config = ConfigDict( @@ -35,6 +46,7 @@ class UpdatedRuleResponse(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/user_highlight_result.py b/algoliasearch/search/models/user_highlight_result.py index 531f9b87f..755814c3e 100644 --- a/algoliasearch/search/models/user_highlight_result.py +++ b/algoliasearch/search/models/user_highlight_result.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,20 +20,30 @@ from algoliasearch.search.models.highlight_result import HighlightResult +_ALIASES = { + "user_id": "userID", + "cluster_name": "clusterName", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class UserHighlightResult(BaseModel): """ UserHighlightResult """ - user_id: HighlightResult = Field(alias="userID") - cluster_name: HighlightResult = Field(alias="clusterName") + user_id: HighlightResult + cluster_name: HighlightResult model_config = ConfigDict( use_enum_values=True, populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/user_hit.py b/algoliasearch/search/models/user_hit.py index 4bd3a3f34..0c228c213 100644 --- a/algoliasearch/search/models/user_hit.py +++ b/algoliasearch/search/models/user_hit.py @@ -11,7 +11,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field, field_validator +from pydantic import BaseModel, ConfigDict, field_validator if version_info >= (3, 11): from typing import Self @@ -21,23 +21,36 @@ from algoliasearch.search.models.user_highlight_result import UserHighlightResult +_ALIASES = { + "user_id": "userID", + "cluster_name": "clusterName", + "nb_records": "nbRecords", + "data_size": "dataSize", + "object_id": "objectID", + "highlight_result": "_highlightResult", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class UserHit(BaseModel): """ UserHit """ - user_id: str = Field(alias="userID") + user_id: str """ Unique identifier of the user who makes the search request. """ - cluster_name: str = Field(alias="clusterName") + cluster_name: str """ Cluster name. """ - nb_records: int = Field(alias="nbRecords") + nb_records: int """ Number of records in the cluster. """ - data_size: int = Field(alias="dataSize") + data_size: int """ Data size taken by all the users assigned to the cluster. """ - object_id: str = Field(alias="objectID") + object_id: str """ userID of the requested user. Same as userID. """ - highlight_result: UserHighlightResult = Field(alias="_highlightResult") + highlight_result: UserHighlightResult @field_validator("user_id") def user_id_validate_regular_expression(cls, value): @@ -53,6 +66,7 @@ def user_id_validate_regular_expression(cls, value): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/user_id.py b/algoliasearch/search/models/user_id.py index a72df793e..cbecaa24b 100644 --- a/algoliasearch/search/models/user_id.py +++ b/algoliasearch/search/models/user_id.py @@ -11,7 +11,7 @@ from sys import version_info from typing import Any, Dict, Optional -from pydantic import BaseModel, ConfigDict, Field, field_validator +from pydantic import BaseModel, ConfigDict, field_validator if version_info >= (3, 11): from typing import Self @@ -19,18 +19,30 @@ from typing_extensions import Self +_ALIASES = { + "user_id": "userID", + "cluster_name": "clusterName", + "nb_records": "nbRecords", + "data_size": "dataSize", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + class UserId(BaseModel): """ Unique user ID. """ - user_id: str = Field(alias="userID") + user_id: str """ Unique identifier of the user who makes the search request. """ - cluster_name: str = Field(alias="clusterName") + cluster_name: str """ Cluster to which the user is assigned. """ - nb_records: int = Field(alias="nbRecords") + nb_records: int """ Number of records belonging to the user. """ - data_size: int = Field(alias="dataSize") + data_size: int """ Data size used by the user. """ @field_validator("user_id") @@ -47,6 +59,7 @@ def user_id_validate_regular_expression(cls, value): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/value.py b/algoliasearch/search/models/value.py index b20208d21..f3cb581de 100644 --- a/algoliasearch/search/models/value.py +++ b/algoliasearch/search/models/value.py @@ -10,7 +10,7 @@ from sys import version_info from typing import Any, Dict, List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict if version_info >= (3, 11): from typing import Self @@ -20,18 +20,26 @@ from algoliasearch.search.models.sort_remaining_by import SortRemainingBy +_ALIASES = { + "order": "order", + "sort_remaining_by": "sortRemainingBy", + "hide": "hide", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + class Value(BaseModel): """ Value """ - order: Optional[List[str]] = Field(default=None, alias="order") + order: Optional[List[str]] = None """ Explicit order of facets or facet values. This setting lets you always show specific facets or facet values at the top of the list. """ - sort_remaining_by: Optional[SortRemainingBy] = Field( - default=None, alias="sortRemainingBy" - ) - hide: Optional[List[str]] = Field(default=None, alias="hide") + sort_remaining_by: Optional[SortRemainingBy] = None + hide: Optional[List[str]] = None """ Hide facet values. """ model_config = ConfigDict( @@ -39,6 +47,7 @@ class Value(BaseModel): populate_by_name=True, validate_assignment=True, protected_namespaces=(), + alias_generator=_alias_generator, ) def to_json(self) -> str: diff --git a/algoliasearch/search/models/widgets.py b/algoliasearch/search/models/widgets.py new file mode 100644 index 000000000..5baaaf592 --- /dev/null +++ b/algoliasearch/search/models/widgets.py @@ -0,0 +1,78 @@ +# coding: utf-8 + +""" +Code generated by OpenAPI Generator (https://openapi-generator.tech), manual changes will be lost - read more on https://github.com/algolia/api-clients-automation. DO NOT EDIT. +""" + +from __future__ import annotations + +from json import loads +from sys import version_info +from typing import Any, Dict, Optional + +from pydantic import BaseModel, ConfigDict + +if version_info >= (3, 11): + from typing import Self +else: + from typing_extensions import Self + + +from algoliasearch.search.models.banners import Banners + +_ALIASES = { + "banners": "banners", +} + + +def _alias_generator(name: str) -> str: + return _ALIASES.get(name, name) + + +class Widgets(BaseModel): + """ + widgets returned from any rules that are applied to the current search. + """ + + banners: Optional[Banners] = None + + model_config = ConfigDict( + use_enum_values=True, + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + alias_generator=_alias_generator, + ) + + def to_json(self) -> str: + return self.model_dump_json(by_alias=True, exclude_unset=True) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of Widgets from a JSON string""" + return cls.from_dict(loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias.""" + return self.model_dump( + by_alias=True, + exclude_none=True, + exclude_unset=True, + ) + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of Widgets from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + obj["banners"] = ( + Banners.from_dict(obj["banners"]) + if obj.get("banners") is not None + else None + ) + + return cls.model_validate(obj) diff --git a/poetry.lock b/poetry.lock index d2adc9216..f6bbdd91b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -495,6 +495,17 @@ files = [ [package.dependencies] typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.11\""} +[[package]] +name = "nodeenv" +version = "1.9.1" +description = "Node.js virtual environment builder" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, + {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, +] + [[package]] name = "pydantic" version = "2.9.1" @@ -619,6 +630,26 @@ files = [ [package.dependencies] typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" +[[package]] +name = "pyright" +version = "1.1.383" +description = "Command line wrapper for pyright" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pyright-1.1.383-py3-none-any.whl", hash = "sha256:d864d1182a313f45aaf99e9bfc7d2668eeabc99b29a556b5344894fd73cb1959"}, + {file = "pyright-1.1.383.tar.gz", hash = "sha256:1df7f12407f3710c9c6df938d98ec53f70053e6c6bbf71ce7bcb038d42f10070"}, +] + +[package.dependencies] +nodeenv = ">=1.6.0" +typing-extensions = ">=4.1" + +[package.extras] +all = ["nodejs-wheel-binaries", "twine (>=3.4.1)"] +dev = ["twine (>=3.4.1)"] +nodejs = ["nodejs-wheel-binaries"] + [[package]] name = "python-dateutil" version = "2.9.0.post0" @@ -656,29 +687,29 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "ruff" -version = "0.6.4" +version = "0.6.8" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.6.4-py3-none-linux_armv6l.whl", hash = "sha256:c4b153fc152af51855458e79e835fb6b933032921756cec9af7d0ba2aa01a258"}, - {file = "ruff-0.6.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:bedff9e4f004dad5f7f76a9d39c4ca98af526c9b1695068198b3bda8c085ef60"}, - {file = "ruff-0.6.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d02a4127a86de23002e694d7ff19f905c51e338c72d8e09b56bfb60e1681724f"}, - {file = "ruff-0.6.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7862f42fc1a4aca1ea3ffe8a11f67819d183a5693b228f0bb3a531f5e40336fc"}, - {file = "ruff-0.6.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eebe4ff1967c838a1a9618a5a59a3b0a00406f8d7eefee97c70411fefc353617"}, - {file = "ruff-0.6.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:932063a03bac394866683e15710c25b8690ccdca1cf192b9a98260332ca93408"}, - {file = "ruff-0.6.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:50e30b437cebef547bd5c3edf9ce81343e5dd7c737cb36ccb4fe83573f3d392e"}, - {file = "ruff-0.6.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c44536df7b93a587de690e124b89bd47306fddd59398a0fb12afd6133c7b3818"}, - {file = "ruff-0.6.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ea086601b22dc5e7693a78f3fcfc460cceabfdf3bdc36dc898792aba48fbad6"}, - {file = "ruff-0.6.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b52387d3289ccd227b62102c24714ed75fbba0b16ecc69a923a37e3b5e0aaaa"}, - {file = "ruff-0.6.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:0308610470fcc82969082fc83c76c0d362f562e2f0cdab0586516f03a4e06ec6"}, - {file = "ruff-0.6.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:803b96dea21795a6c9d5bfa9e96127cc9c31a1987802ca68f35e5c95aed3fc0d"}, - {file = "ruff-0.6.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:66dbfea86b663baab8fcae56c59f190caba9398df1488164e2df53e216248baa"}, - {file = "ruff-0.6.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:34d5efad480193c046c86608dbba2bccdc1c5fd11950fb271f8086e0c763a5d1"}, - {file = "ruff-0.6.4-py3-none-win32.whl", hash = "sha256:f0f8968feea5ce3777c0d8365653d5e91c40c31a81d95824ba61d871a11b8523"}, - {file = "ruff-0.6.4-py3-none-win_amd64.whl", hash = "sha256:549daccee5227282289390b0222d0fbee0275d1db6d514550d65420053021a58"}, - {file = "ruff-0.6.4-py3-none-win_arm64.whl", hash = "sha256:ac4b75e898ed189b3708c9ab3fc70b79a433219e1e87193b4f2b77251d058d14"}, - {file = "ruff-0.6.4.tar.gz", hash = "sha256:ac3b5bfbee99973f80aa1b7cbd1c9cbce200883bdd067300c22a6cc1c7fba212"}, + {file = "ruff-0.6.8-py3-none-linux_armv6l.whl", hash = "sha256:77944bca110ff0a43b768f05a529fecd0706aac7bcce36d7f1eeb4cbfca5f0f2"}, + {file = "ruff-0.6.8-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:27b87e1801e786cd6ede4ada3faa5e254ce774de835e6723fd94551464c56b8c"}, + {file = "ruff-0.6.8-py3-none-macosx_11_0_arm64.whl", hash = "sha256:cd48f945da2a6334f1793d7f701725a76ba93bf3d73c36f6b21fb04d5338dcf5"}, + {file = "ruff-0.6.8-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:677e03c00f37c66cea033274295a983c7c546edea5043d0c798833adf4cf4c6f"}, + {file = "ruff-0.6.8-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9f1476236b3eacfacfc0f66aa9e6cd39f2a624cb73ea99189556015f27c0bdeb"}, + {file = "ruff-0.6.8-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f5a2f17c7d32991169195d52a04c95b256378bbf0de8cb98478351eb70d526f"}, + {file = "ruff-0.6.8-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:5fd0d4b7b1457c49e435ee1e437900ced9b35cb8dc5178921dfb7d98d65a08d0"}, + {file = "ruff-0.6.8-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8034b19b993e9601f2ddf2c517451e17a6ab5cdb1c13fdff50c1442a7171d87"}, + {file = "ruff-0.6.8-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6cfb227b932ba8ef6e56c9f875d987973cd5e35bc5d05f5abf045af78ad8e098"}, + {file = "ruff-0.6.8-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ef0411eccfc3909269fed47c61ffebdcb84a04504bafa6b6df9b85c27e813b0"}, + {file = "ruff-0.6.8-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:007dee844738c3d2e6c24ab5bc7d43c99ba3e1943bd2d95d598582e9c1b27750"}, + {file = "ruff-0.6.8-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ce60058d3cdd8490e5e5471ef086b3f1e90ab872b548814e35930e21d848c9ce"}, + {file = "ruff-0.6.8-py3-none-musllinux_1_2_i686.whl", hash = "sha256:1085c455d1b3fdb8021ad534379c60353b81ba079712bce7a900e834859182fa"}, + {file = "ruff-0.6.8-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:70edf6a93b19481affd287d696d9e311388d808671bc209fb8907b46a8c3af44"}, + {file = "ruff-0.6.8-py3-none-win32.whl", hash = "sha256:792213f7be25316f9b46b854df80a77e0da87ec66691e8f012f887b4a671ab5a"}, + {file = "ruff-0.6.8-py3-none-win_amd64.whl", hash = "sha256:ec0517dc0f37cad14a5319ba7bba6e7e339d03fbf967a6d69b0907d61be7a263"}, + {file = "ruff-0.6.8-py3-none-win_arm64.whl", hash = "sha256:8d3bb2e3fbb9875172119021a13eed38849e762499e3cfde9588e4b4d70968dc"}, + {file = "ruff-0.6.8.tar.gz", hash = "sha256:a5bf44b1aa0adaf6d9d20f86162b34f7c593bfedabc51239953e446aefc8ce18"}, ] [[package]] @@ -828,4 +859,4 @@ multidict = ">=4.0" [metadata] lock-version = "2.0" python-versions = ">= 3.8.1" -content-hash = "fb564ceafa860bc38c52945bf51504c4d7a6eb09f5a9cd65acd1aeafde93c22a" +content-hash = "35839d7442a42fd668b941cc16a5f0c2636889049af63060f869d5ffd250edd5" diff --git a/pyproject.toml b/pyproject.toml index a021ed966..ccd2318eb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,7 +23,8 @@ async-timeout = ">= 4.0.3" pydantic = ">= 2" [tool.poetry.group.dev.dependencies] -ruff = "== 0.6.4" +ruff = "== 0.6.8" +pyright = "== 1.1.383" [tool.ruff] line-length = 88 @@ -40,4 +41,4 @@ quote-style = "double" [build-system] requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" \ No newline at end of file +build-backend = "poetry.core.masonry.api"