From 245bd0894a61b3f309d79cdfa040dd344eabc29f Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Thu, 11 Jul 2024 14:54:16 -0700 Subject: [PATCH] removed conversion decorators --- .../bigtable/data/_async/_mutate_rows.py | 11 ---- .../cloud/bigtable/data/_async/_read_rows.py | 8 --- google/cloud/bigtable/data/_async/client.py | 43 ------------- .../bigtable/data/_async/mutations_batcher.py | 22 ------- .../cloud/bigtable/data/_sync/cross_sync.py | 11 ---- tests/system/data/test_system_async.py | 11 ---- tests/unit/data/_async/test__mutate_rows.py | 4 -- tests/unit/data/_async/test__read_rows.py | 11 ---- tests/unit/data/_async/test_client.py | 64 ------------------- .../data/_async/test_mutations_batcher.py | 15 ----- .../data/_async/test_read_rows_acceptance.py | 11 ---- 11 files changed, 211 deletions(-) diff --git a/google/cloud/bigtable/data/_async/_mutate_rows.py b/google/cloud/bigtable/data/_async/_mutate_rows.py index 6d4d2f2e8..2e7181695 100644 --- a/google/cloud/bigtable/data/_async/_mutate_rows.py +++ b/google/cloud/bigtable/data/_async/_mutate_rows.py @@ -62,9 +62,6 @@ class _EntryWithProto: # noqa: F811 proto: types_pb.MutateRowsRequest.Entry -@CrossSync.export_sync( - path="google.cloud.bigtable.data._sync._mutate_rows._MutateRowsOperation", -) class _MutateRowsOperationAsync: """ MutateRowsOperation manages the logic of sending a set of row mutations, @@ -84,12 +81,6 @@ class _MutateRowsOperationAsync: If not specified, the request will run until operation_timeout is reached. """ - @CrossSync.convert( - replace_symbols={ - "BigtableAsyncClient": "BigtableClient", - "TableAsync": "Table", - } - ) def __init__( self, gapic_client: "BigtableAsyncClient", @@ -141,7 +132,6 @@ def __init__( self.remaining_indices = list(range(len(self.mutations))) self.errors: dict[int, list[Exception]] = {} - @CrossSync.convert async def start(self): """ Start the operation, and run until completion @@ -174,7 +164,6 @@ async def start(self): if all_errors: raise MutationsExceptionGroup(all_errors, len(self.mutations)) - @CrossSync.convert async def _run_attempt(self): """ Run a single attempt of the mutate_rows rpc. diff --git a/google/cloud/bigtable/data/_async/_read_rows.py b/google/cloud/bigtable/data/_async/_read_rows.py index 8c982427c..9285d5f6f 100644 --- a/google/cloud/bigtable/data/_async/_read_rows.py +++ b/google/cloud/bigtable/data/_async/_read_rows.py @@ -51,9 +51,6 @@ def __init__(self, chunk): self.chunk = chunk -@CrossSync.export_sync( - path="google.cloud.bigtable.data._sync._read_rows._ReadRowsOperation", -) class _ReadRowsOperationAsync: """ ReadRowsOperation handles the logic of merging chunks from a ReadRowsResponse stream @@ -85,7 +82,6 @@ class _ReadRowsOperationAsync: "_remaining_count", ) - @CrossSync.convert(replace_symbols={"TableAsync": "Table"}) def __init__( self, query: ReadRowsQuery, @@ -166,7 +162,6 @@ def _read_rows_attempt(self) -> CrossSync.Iterable[Row]: chunked_stream = self.chunk_stream(gapic_stream) return self.merge_rows(chunked_stream) - @CrossSync.convert async def chunk_stream( self, stream: CrossSync.Awaitable[CrossSync.Iterable[ReadRowsResponsePB]] ) -> CrossSync.Iterable[ReadRowsResponsePB.CellChunk]: @@ -219,9 +214,6 @@ async def chunk_stream( current_key = None @staticmethod - @CrossSync.convert( - replace_symbols={"__aiter__": "__iter__", "__anext__": "__next__"} - ) async def merge_rows( chunks: CrossSync.Iterable[ReadRowsResponsePB.CellChunk] | None, ) -> CrossSync.Iterable[Row]: diff --git a/google/cloud/bigtable/data/_async/client.py b/google/cloud/bigtable/data/_async/client.py index e7d84ebf1..1c52f83c2 100644 --- a/google/cloud/bigtable/data/_async/client.py +++ b/google/cloud/bigtable/data/_async/client.py @@ -110,17 +110,7 @@ from google.cloud.bigtable.data._helpers import ShardedQuery -@CrossSync.export_sync( - path="google.cloud.bigtable.data._sync.client.BigtableDataClient", -) class BigtableDataClientAsync(ClientWithProject): - @CrossSync.convert( - replace_symbols={ - "BigtableAsyncClient": "BigtableClient", - "PooledBigtableGrpcAsyncIOTransport": "PooledBigtableGrpcTransport", - "AsyncPooledChannel": "PooledChannel", - } - ) def __init__( self, *, @@ -266,7 +256,6 @@ def _start_background_channel_refresh(self) -> None: lambda _: self._channel_refresh_tasks.remove(refresh_task) if refresh_task in self._channel_refresh_tasks else None ) - @CrossSync.convert async def close(self, timeout: float | None = None): """ Cancel all background tasks @@ -279,7 +268,6 @@ async def close(self, timeout: float | None = None): self._executor.shutdown(wait=False) await CrossSync.wait(self._channel_refresh_tasks, timeout=timeout) - @CrossSync.convert async def _ping_and_warm_instances( self, channel: Channel, instance_key: _helpers._WarmedInstanceKey | None = None ) -> list[BaseException | None]: @@ -321,7 +309,6 @@ async def _ping_and_warm_instances( ) return [r or None for r in result_list] - @CrossSync.convert async def _manage_channel( self, channel_idx: int, @@ -378,7 +365,6 @@ async def _manage_channel( next_refresh = random.uniform(refresh_interval_min, refresh_interval_max) next_sleep = next_refresh - (time.monotonic() - start_timestamp) - @CrossSync.convert(replace_symbols={"TableAsync": "Table"}) async def _register_instance(self, instance_id: str, owner: TableAsync) -> None: """ Registers an instance with the client, and warms the channel pool @@ -409,7 +395,6 @@ async def _register_instance(self, instance_id: str, owner: TableAsync) -> None: # refresh tasks aren't active. start them as background tasks self._start_background_channel_refresh() - @CrossSync.convert(replace_symbols={"TableAsync": "Table"}) async def _remove_instance_registration( self, instance_id: str, owner: TableAsync ) -> bool: @@ -440,7 +425,6 @@ async def _remove_instance_registration( except KeyError: return False - @CrossSync.convert(replace_symbols={"TableAsync": "Table"}) def get_table(self, instance_id: str, table_id: str, *args, **kwargs) -> TableAsync: """ Returns a table instance for making data API requests. All arguments are passed @@ -482,18 +466,15 @@ def get_table(self, instance_id: str, table_id: str, *args, **kwargs) -> TableAs """ return TableAsync(self, instance_id, table_id, *args, **kwargs) - @CrossSync.convert(sync_name="__enter__") async def __aenter__(self): self._start_background_channel_refresh() return self - @CrossSync.convert(sync_name="__exit__", replace_symbols={"__aexit__": "__exit__"}) async def __aexit__(self, exc_type, exc_val, exc_tb): await self.close() await self._gapic_client.__aexit__(exc_type, exc_val, exc_tb) -@CrossSync.export_sync(path="google.cloud.bigtable.data._sync.client.Table") class TableAsync: """ Main Data API surface @@ -502,9 +483,6 @@ class TableAsync: each call """ - @CrossSync.convert( - replace_symbols={"BigtableDataClientAsync": "BigtableDataClient"} - ) def __init__( self, client: BigtableDataClientAsync, @@ -625,12 +603,6 @@ def __init__( f"{self.__class__.__name__} must be created within an async event loop context." ) from e - @CrossSync.convert( - replace_symbols={ - "AsyncIterable": "Iterable", - "_ReadRowsOperationAsync": "_ReadRowsOperation", - } - ) async def read_rows_stream( self, query: ReadRowsQuery, @@ -681,7 +653,6 @@ async def read_rows_stream( ) return row_merger.start_operation() - @CrossSync.convert async def read_rows( self, query: ReadRowsQuery, @@ -729,7 +700,6 @@ async def read_rows( ) return [row async for row in row_generator] - @CrossSync.convert async def read_row( self, row_key: str | bytes, @@ -779,7 +749,6 @@ async def read_row( return None return results[0] - @CrossSync.convert async def read_rows_sharded( self, sharded_query: ShardedQuery, @@ -879,7 +848,6 @@ async def read_rows_with_semaphore(query): ) return results_list - @CrossSync.convert async def row_exists( self, row_key: str | bytes, @@ -928,7 +896,6 @@ async def row_exists( ) return len(results) > 0 - @CrossSync.convert async def sample_row_keys( self, *, @@ -1001,7 +968,6 @@ async def execute_rpc(): exception_factory=_helpers._retry_exception_factory, ) - @CrossSync.convert(replace_symbols={"MutationsBatcherAsync": "MutationsBatcher"}) def mutations_batcher( self, *, @@ -1051,7 +1017,6 @@ def mutations_batcher( batch_retryable_errors=batch_retryable_errors, ) - @CrossSync.convert async def mutate_row( self, row_key: str | bytes, @@ -1130,9 +1095,6 @@ async def mutate_row( exception_factory=_helpers._retry_exception_factory, ) - @CrossSync.convert( - replace_symbols={"_MutateRowsOperationAsync": "_MutateRowsOperation"} - ) async def bulk_mutate_rows( self, mutation_entries: list[RowMutationEntry], @@ -1188,7 +1150,6 @@ async def bulk_mutate_rows( ) await operation.start() - @CrossSync.convert async def check_and_mutate_row( self, row_key: str | bytes, @@ -1255,7 +1216,6 @@ async def check_and_mutate_row( ) return result.predicate_matched - @CrossSync.convert async def read_modify_write_row( self, row_key: str | bytes, @@ -1306,7 +1266,6 @@ async def read_modify_write_row( # construct Row from result return Row._from_pb(result.row) - @CrossSync.convert async def close(self): """ Called to close the Table instance and release any resources held by it. @@ -1315,7 +1274,6 @@ async def close(self): self._register_instance_future.cancel() await self.client._remove_instance_registration(self.instance_id, self) - @CrossSync.convert(sync_name="__enter__") async def __aenter__(self): """ Implement async context manager protocol @@ -1327,7 +1285,6 @@ async def __aenter__(self): await self._register_instance_future return self - @CrossSync.convert(sync_name="__exit__") async def __aexit__(self, exc_type, exc_val, exc_tb): """ Implement async context manager protocol diff --git a/google/cloud/bigtable/data/_async/mutations_batcher.py b/google/cloud/bigtable/data/_async/mutations_batcher.py index b9a6a3339..07eac0e26 100644 --- a/google/cloud/bigtable/data/_async/mutations_batcher.py +++ b/google/cloud/bigtable/data/_async/mutations_batcher.py @@ -51,9 +51,6 @@ from google.cloud.bigtable.data._sync.client import Table # noqa: F401 -@CrossSync.export_sync( - path="google.cloud.bigtable.data._sync.mutations_batcher._FlowControl" -) class _FlowControlAsync: """ Manages flow control for batched mutations. Mutations are registered against @@ -110,7 +107,6 @@ def _has_capacity(self, additional_count: int, additional_size: int) -> bool: new_count = self._in_flight_mutation_count + additional_count return new_size <= acceptable_size and new_count <= acceptable_count - @CrossSync.convert async def remove_from_flow( self, mutations: RowMutationEntry | list[RowMutationEntry] ) -> None: @@ -132,7 +128,6 @@ async def remove_from_flow( async with self._capacity_condition: self._capacity_condition.notify_all() - @CrossSync.convert async def add_to_flow(self, mutations: RowMutationEntry | list[RowMutationEntry]): """ Generator function that registers mutations with flow control. As mutations @@ -182,10 +177,6 @@ async def add_to_flow(self, mutations: RowMutationEntry | list[RowMutationEntry] yield mutations[start_idx:end_idx] -@CrossSync.export_sync( - path="google.cloud.bigtable.data._sync.mutations_batcher.MutationsBatcher", - mypy_ignore=["unreachable"], -) class MutationsBatcherAsync: """ Allows users to send batches using context manager API: @@ -217,9 +208,6 @@ class MutationsBatcherAsync: Defaults to the Table's default_mutate_rows_retryable_errors. """ - @CrossSync.convert( - replace_symbols={"TableAsync": "Table", "_FlowControlAsync": "_FlowControl"} - ) def __init__( self, table: TableAsync, @@ -275,7 +263,6 @@ def __init__( # clean up on program exit atexit.register(self._on_exit) - @CrossSync.convert async def _timer_routine(self, interval: float | None) -> None: """ Set up a background task to flush the batcher every interval seconds @@ -296,7 +283,6 @@ async def _timer_routine(self, interval: float | None) -> None: if not self._closed.is_set() and self._staged_entries: self._schedule_flush() - @CrossSync.convert async def append(self, mutation_entry: RowMutationEntry): """ Add a new set of mutations to the internal queue @@ -346,7 +332,6 @@ def _schedule_flush(self) -> CrossSync.Future[None] | None: return new_task return None - @CrossSync.convert async def _flush_internal(self, new_entries: list[RowMutationEntry]): """ Flushes a set of mutations to the server, and updates internal state @@ -367,9 +352,6 @@ async def _flush_internal(self, new_entries: list[RowMutationEntry]): self._entries_processed_since_last_raise += len(new_entries) self._add_exceptions(found_exceptions) - @CrossSync.convert( - replace_symbols={"_MutateRowsOperationAsync": "_MutateRowsOperation"} - ) async def _execute_mutate_rows( self, batch: list[RowMutationEntry] ) -> list[FailedMutationEntryError]: @@ -450,12 +432,10 @@ def _raise_exceptions(self): entry_count=entry_count, ) - @CrossSync.convert(sync_name="__enter__") async def __aenter__(self): """Allow use of context manager API""" return self - @CrossSync.convert(sync_name="__exit__") async def __aexit__(self, exc_type, exc, tb): """ Allow use of context manager API. @@ -472,7 +452,6 @@ def closed(self) -> bool: """ return self._closed.is_set() - @CrossSync.convert async def close(self): """ Flush queue and clean up resources @@ -500,7 +479,6 @@ def _on_exit(self): ) @staticmethod - @CrossSync.convert async def _wait_for_batch_results( *tasks: CrossSync.Future[list[FailedMutationEntryError]] | CrossSync.Future[None], diff --git a/google/cloud/bigtable/data/_sync/cross_sync.py b/google/cloud/bigtable/data/_sync/cross_sync.py index dd87a63b5..e3f1169fa 100644 --- a/google/cloud/bigtable/data/_sync/cross_sync.py +++ b/google/cloud/bigtable/data/_sync/cross_sync.py @@ -159,17 +159,6 @@ class CrossSync(metaclass=_DecoratorMeta): Generator: TypeAlias = AsyncGenerator _decorators: list[AstDecorator] = [ - AstDecorator("export_sync", # decorate classes to convert - required_keywords=["path"], # otput path for generated sync class - replace_symbols={}, # replace specific symbols across entire class - mypy_ignore=(), # set of mypy error codes to ignore in output file - include_file_imports=True # when True, import statements from top of file will be included in output file - ), - AstDecorator("convert", # decorate methods to convert from async to sync - sync_name=None, # use a new name for the sync class - replace_symbols={}, # replace specific symbols within the function - ), - AstDecorator("drop_method"), # decorate methods to drop in sync version of class AstDecorator("pytest", inner_decorator=pytest_mark_asyncio), # decorate test methods to run with pytest-asyncio AstDecorator("pytest_fixture", # decorate test methods to run with pytest fixture inner_decorator=pytest_asyncio_fixture, diff --git a/tests/system/data/test_system_async.py b/tests/system/data/test_system_async.py index 32ff5f49c..ed3435e39 100644 --- a/tests/system/data/test_system_async.py +++ b/tests/system/data/test_system_async.py @@ -34,7 +34,6 @@ TEST_FAMILY = "test-family" TEST_FAMILY_2 = "test-family-2" -@CrossSync.export_sync(path="tests.system.data.test_system.TempRowBuilder") class TempRowBuilderAsync: """ Used to add rows to a table for testing purposes. @@ -44,7 +43,6 @@ def __init__(self, table): self.rows = [] self.table = table - @CrossSync.convert async def add_row( self, row_key, *, family=TEST_FAMILY, qualifier=b"q", value=b"test-value" ): @@ -68,7 +66,6 @@ async def add_row( await self.table.client._gapic_client.mutate_row(request) self.rows.append(row_key) - @CrossSync.convert async def delete_rows(self): if self.rows: request = { @@ -81,17 +78,14 @@ async def delete_rows(self): await self.table.client._gapic_client.mutate_rows(request) -@CrossSync.export_sync(path="tests.system.data.test_system.TestSystem") class TestSystemAsync: - @CrossSync.convert(replace_symbols={"BigtableDataClientAsync": "BigtableDataClient"}) @CrossSync.pytest_fixture(scope="session") async def client(self): project = os.getenv("GOOGLE_CLOUD_PROJECT") or None async with BigtableDataClientAsync(project=project, pool_size=4) as client: yield client - @CrossSync.convert @CrossSync.pytest_fixture(scope="session") async def table(self, client, table_id, instance_id): async with client.get_table( @@ -139,7 +133,6 @@ def cluster_config(self, project_id): } return cluster - @CrossSync.convert @pytest.mark.usefixtures("table") async def _retrieve_cell_value(self, table, row_key): """ @@ -153,7 +146,6 @@ async def _retrieve_cell_value(self, table, row_key): cell = row.cells[0] return cell.value - @CrossSync.convert async def _create_row_and_mutation( self, table, temp_rows, *, start_value=b"start", new_value=b"new_value" ): @@ -174,7 +166,6 @@ async def _create_row_and_mutation( mutation = SetCell(family=TEST_FAMILY, qualifier=qualifier, new_value=new_value) return row_key, mutation - @CrossSync.convert(replace_symbols={"TempRowBuilderAsync": "TempRowBuilder"}) @CrossSync.pytest_fixture(scope="function") async def temp_rows(self, table): builder = TempRowBuilderAsync(table) @@ -665,7 +656,6 @@ async def test_check_and_mutate_empty_request(self, client, table): assert "No mutations provided" in str(e.value) @pytest.mark.usefixtures("table") - @CrossSync.convert(replace_symbols={"__anext__": "__next__"}) @CrossSync.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) @@ -854,7 +844,6 @@ async def test_read_rows_with_filter(self, table, temp_rows): assert row[0].labels == [expected_label] @pytest.mark.usefixtures("table") - @CrossSync.convert(replace_symbols={"__anext__": "__next__", "aclose": "close"}) @CrossSync.pytest async def test_read_rows_stream_close(self, table, temp_rows): """ diff --git a/tests/unit/data/_async/test__mutate_rows.py b/tests/unit/data/_async/test__mutate_rows.py index 292cbd692..b7016be81 100644 --- a/tests/unit/data/_async/test__mutate_rows.py +++ b/tests/unit/data/_async/test__mutate_rows.py @@ -28,9 +28,6 @@ import mock # type: ignore -@CrossSync.export_sync( - path="tests.unit.data._sync.test__mutate_rows.TestMutateRowsOperation", -) class TestMutateRowsOperation: def _target_class(self): if CrossSync.is_async: @@ -62,7 +59,6 @@ def _make_mutation(self, count=1, size=1): mutation.mutations = [mock.Mock()] * count return mutation - @CrossSync.convert async def _mock_stream(self, mutation_list, error_dict): for idx, entry in enumerate(mutation_list): code = error_dict.get(idx, 0) diff --git a/tests/unit/data/_async/test__read_rows.py b/tests/unit/data/_async/test__read_rows.py index 076e86788..e04436ee1 100644 --- a/tests/unit/data/_async/test__read_rows.py +++ b/tests/unit/data/_async/test__read_rows.py @@ -34,9 +34,6 @@ TEST_LABELS = ["label1", "label2"] -@CrossSync.export_sync( - path="tests.unit.data._sync.test__read_rows.TestReadRowsOperation", -) class TestReadRowsOperationAsync: """ Tests helper functions in the ReadRowsOperation class @@ -45,9 +42,6 @@ class TestReadRowsOperationAsync: """ @staticmethod - @CrossSync.convert( - replace_symbols={"_ReadRowsOperationAsync": "_ReadRowsOperation"} - ) def _get_target_class(): return _ReadRowsOperationAsync @@ -332,10 +326,6 @@ async def mock_stream(): assert "emit count exceeds row limit" in str(e.value) @CrossSync.pytest - @CrossSync.convert( - sync_name="test_close", - replace_symbols={"aclose": "close", "__anext__": "__next__"}, - ) async def test_aclose(self): """ should be able to close a stream safely with aclose. @@ -365,7 +355,6 @@ async def mock_stream(): await wrapped_gen.__anext__() @CrossSync.pytest - @CrossSync.convert(replace_symbols={"__anext__": "__next__"}) async def test_retryable_ignore_repeated_rows(self): """ Duplicate rows should cause an invalid chunk error diff --git a/tests/unit/data/_async/test_client.py b/tests/unit/data/_async/test_client.py index 0f5775fac..5370f35d3 100644 --- a/tests/unit/data/_async/test_client.py +++ b/tests/unit/data/_async/test_client.py @@ -76,21 +76,8 @@ ) -@CrossSync.export_sync( - path="tests.unit.data._sync.test_client.TestBigtableDataClient", - replace_symbols={ - "TestTableAsync": "TestTable", - "PooledBigtableGrpcAsyncIOTransport": "PooledBigtableGrpcTransport", - "grpc_helpers_async": "grpc_helpers", - "PooledChannelAsync": "PooledChannel", - "BigtableAsyncClient": "BigtableClient", - }, -) class TestBigtableDataClientAsync: @staticmethod - @CrossSync.convert( - replace_symbols={"BigtableDataClientAsync": "BigtableDataClient"} - ) def _get_target_class(): return BigtableDataClientAsync @@ -300,7 +287,6 @@ async def test_channel_pool_replace(self): assert client.transport._grpc_channel._pool[i] != start_pool[i] await client.close() - @CrossSync.drop_method @pytest.mark.filterwarnings("ignore::RuntimeWarning") def test__start_background_channel_refresh_sync(self): # should raise RuntimeError if called in a sync context @@ -344,7 +330,6 @@ async def test__start_background_channel_refresh(self, pool_size): ping_and_warm.assert_any_call(channel) await client.close() - @CrossSync.drop_method @CrossSync.pytest @pytest.mark.skipif( sys.version_info < (3, 8), reason="Task.name requires python3.8 or higher" @@ -1125,7 +1110,6 @@ async def test_context_manager(self): # actually close the client await true_close - @CrossSync.drop_method def test_client_ctor_sync(self): # initializing client in a sync context should raise RuntimeError @@ -1141,16 +1125,11 @@ def test_client_ctor_sync(self): assert client._channel_refresh_tasks == [] -@CrossSync.export_sync(path="tests.unit.data._sync.test_client.TestTable") class TestTableAsync: - @CrossSync.convert( - replace_symbols={"TestBigtableDataClientAsync": "TestBigtableDataClient"} - ) def _make_client(self, *args, **kwargs): return TestBigtableDataClientAsync._make_client(*args, **kwargs) @staticmethod - @CrossSync.convert(replace_symbols={"TableAsync": "Table"}) def _get_target_class(): return TableAsync @@ -1272,7 +1251,6 @@ async def test_table_ctor_invalid_timeout_values(self): assert "operation_timeout must be greater than 0" in str(e.value) await client.close() - @CrossSync.drop_method def test_table_ctor_sync(self): # initializing client in a sync context should raise RuntimeError client = mock.Mock() @@ -1422,7 +1400,6 @@ async def test_customizable_retryable_errors( ) @pytest.mark.parametrize("include_app_profile", [True, False]) @CrossSync.pytest - @CrossSync.convert(replace_symbols={"BigtableAsyncClient": "BigtableClient"}) async def test_call_metadata(self, include_app_profile, fn_name, fn_args, gapic_fn): """check that all requests attach proper metadata headers""" profile = "profile" if include_app_profile else None @@ -1455,26 +1432,18 @@ async def test_call_metadata(self, include_app_profile, fn_name, fn_args, gapic_ assert "app_profile_id=" not in goog_metadata -@CrossSync.export_sync(path="tests.unit.data._sync.test_client.TestReadRows") class TestReadRowsAsync: """ Tests for table.read_rows and related methods. """ @staticmethod - @CrossSync.convert( - replace_symbols={"_ReadRowsOperationAsync": "_ReadRowsOperation"} - ) def _get_operation_class(): return _ReadRowsOperationAsync - @CrossSync.convert( - replace_symbols={"TestBigtableDataClientAsync": "TestBigtableDataClient"} - ) def _make_client(self, *args, **kwargs): return TestBigtableDataClientAsync._make_client(*args, **kwargs) - @CrossSync.convert(replace_symbols={"TestTableAsync": "TestTable"}) def _make_table(self, *args, **kwargs): client_mock = mock.Mock() client_mock._register_instance.side_effect = ( @@ -1522,7 +1491,6 @@ def _make_chunk(*args, **kwargs): return ReadRowsResponse.CellChunk(*args, **kwargs) @staticmethod - @CrossSync.convert async def _make_gapic_stream( chunk_list: list[ReadRowsResponse.CellChunk | Exception], sleep_time=0, @@ -1561,7 +1529,6 @@ def cancel(self): return mock_stream(chunk_list, sleep_time) - @CrossSync.convert async def execute_fn(self, table, *args, **kwargs): return await table.read_rows(*args, **kwargs) @@ -1973,11 +1940,7 @@ async def test_row_exists(self, return_value, expected_result): assert query.filter._to_dict() == expected_filter -@CrossSync.export_sync(path="tests.unit.data._sync.test_client.TestReadRowsSharded") class TestReadRowsShardedAsync: - @CrossSync.convert( - replace_symbols={"TestBigtableDataClientAsync": "TestBigtableDataClient"} - ) def _make_client(self, *args, **kwargs): return TestBigtableDataClientAsync._make_client(*args, **kwargs) @@ -1990,7 +1953,6 @@ async def test_read_rows_sharded_empty_query(self): assert "empty sharded_query" in str(exc.value) @CrossSync.pytest - @CrossSync.convert(replace_symbols={"TestReadRowsAsync": "TestReadRows"}) async def test_read_rows_sharded_multiple_queries(self): """ Test with multiple queries. Should return results from both @@ -2198,15 +2160,10 @@ async def mock_call(*args, **kwargs): ) -@CrossSync.export_sync(path="tests.unit.data._sync.test_client.TestSampleRowKeys") class TestSampleRowKeysAsync: - @CrossSync.convert( - replace_symbols={"TestBigtableDataClientAsync": "TestBigtableDataClient"} - ) def _make_client(self, *args, **kwargs): return TestBigtableDataClientAsync._make_client(*args, **kwargs) - @CrossSync.convert async def _make_gapic_stream(self, sample_list: list[tuple[bytes, int]]): from google.cloud.bigtable_v2.types import SampleRowKeysResponse @@ -2354,13 +2311,7 @@ async def test_sample_row_keys_non_retryable_errors(self, non_retryable_exceptio await table.sample_row_keys() -@CrossSync.export_sync( - path="tests.unit.data._sync.test_client.TestMutateRow", -) class TestMutateRowAsync: - @CrossSync.convert( - replace_symbols={"TestBigtableDataClientAsync": "TestBigtableDataClient"} - ) def _make_client(self, *args, **kwargs): return TestBigtableDataClientAsync._make_client(*args, **kwargs) @@ -2535,17 +2486,10 @@ async def test_mutate_row_no_mutations(self, mutations): assert e.value.args[0] == "No mutations provided" -@CrossSync.export_sync( - path="tests.unit.data._sync.test_client.TestBulkMutateRows", -) class TestBulkMutateRowsAsync: - @CrossSync.convert( - replace_symbols={"TestBigtableDataClientAsync": "TestBigtableDataClient"} - ) def _make_client(self, *args, **kwargs): return TestBigtableDataClientAsync._make_client(*args, **kwargs) - @CrossSync.convert async def _mock_response(self, response_list): from google.cloud.bigtable_v2.types import MutateRowsResponse from google.rpc import status_pb2 @@ -2921,11 +2865,7 @@ async def test_bulk_mutate_error_recovery(self): await table.bulk_mutate_rows(entries, operation_timeout=1000) -@CrossSync.export_sync(path="tests.unit.data._sync.test_client.TestCheckAndMutateRow") class TestCheckAndMutateRowAsync: - @CrossSync.convert( - replace_symbols={"TestBigtableDataClientAsync": "TestBigtableDataClient"} - ) def _make_client(self, *args, **kwargs): return TestBigtableDataClientAsync._make_client(*args, **kwargs) @@ -3076,11 +3016,7 @@ async def test_check_and_mutate_mutations_parsing(self): ) -@CrossSync.export_sync(path="tests.unit.data._sync.test_client.TestReadModifyWriteRow") class TestReadModifyWriteRowAsync: - @CrossSync.convert( - replace_symbols={"TestBigtableDataClientAsync": "TestBigtableDataClient"} - ) def _make_client(self, *args, **kwargs): return TestBigtableDataClientAsync._make_client(*args, **kwargs) diff --git a/tests/unit/data/_async/test_mutations_batcher.py b/tests/unit/data/_async/test_mutations_batcher.py index 85adae2d2..6386adc7f 100644 --- a/tests/unit/data/_async/test_mutations_batcher.py +++ b/tests/unit/data/_async/test_mutations_batcher.py @@ -43,10 +43,8 @@ import mock # type: ignore -@CrossSync.export_sync(path="tests.unit.data._sync.test_mutations_batcher.Test_FlowControl") class Test_FlowControl: @staticmethod - @CrossSync.convert(replace_symbols={"_FlowControlAsync": "_FlowControl"}) def _target_class(): return _FlowControlAsync @@ -321,11 +319,7 @@ async def test_add_to_flow_oversize(self): assert len(count_results) == 1 -@CrossSync.export_sync( - path="tests.unit.data._sync.test_mutations_batcher.TestMutationsBatcher" -) class TestMutationsBatcherAsync: - @CrossSync.convert(replace_symbols={"MutationsBatcherAsync": "MutationsBatcher"}) def _get_target_class(self): return MutationsBatcherAsync @@ -487,7 +481,6 @@ async def test_ctor_invalid_values(self): self._make_one(batch_attempt_timeout=-1) assert "attempt_timeout must be greater than 0" in str(e.value) - @CrossSync.convert(replace_symbols={"TableAsync": "Table"}) def test_default_argument_consistency(self): """ We supply default arguments in MutationsBatcherAsync.__init__, and in @@ -908,7 +901,6 @@ async def gen(x): instance._oldest_exceptions.clear() instance._newest_exceptions.clear() - @CrossSync.convert async def _mock_gapic_return(self, num=5): from google.cloud.bigtable_v2.types import MutateRowsResponse from google.rpc import status_pb2 @@ -1024,18 +1016,12 @@ async def test__raise_exceptions(self): instance._raise_exceptions() @CrossSync.pytest - @CrossSync.convert( - sync_name="test___enter__", replace_symbols={"__aenter__": "__enter__"} - ) async def test___aenter__(self): """Should return self""" async with self._make_one() as instance: assert await instance.__aenter__() == instance @CrossSync.pytest - @CrossSync.convert( - sync_name="test___exit__", replace_symbols={"__aexit__": "__exit__"} - ) async def test___aexit__(self): """aexit should call close""" async with self._make_one() as instance: @@ -1219,7 +1205,6 @@ def test__add_exceptions(self, limit, in_e, start_e, end_e): ([4], [core_exceptions.DeadlineExceeded]), ], ) - @CrossSync.convert(replace_symbols={"TableAsync": "Table"}) async def test_customizable_retryable_errors( self, input_retryables, expected_retryables ): diff --git a/tests/unit/data/_async/test_read_rows_acceptance.py b/tests/unit/data/_async/test_read_rows_acceptance.py index 7cdd2c180..28d8c56f9 100644 --- a/tests/unit/data/_async/test_read_rows_acceptance.py +++ b/tests/unit/data/_async/test_read_rows_acceptance.py @@ -39,21 +39,12 @@ ) from google.cloud.bigtable.data._sync.client import BigtableDataClient # noqa: F401 -@CrossSync.export_sync( - path="tests.unit.data._sync.test_read_rows_acceptance.TestReadRowsAcceptance", -) class TestReadRowsAcceptanceAsync: @staticmethod - @CrossSync.convert( - replace_symbols={"_ReadRowsOperationAsync": "_ReadRowsOperation"} - ) def _get_operation_class(): return _ReadRowsOperationAsync @staticmethod - @CrossSync.convert( - replace_symbols={"BigtableDataClientAsync": "BigtableDataClient"} - ) def _get_client_class(): return BigtableDataClientAsync @@ -83,11 +74,9 @@ def extract_results_from_row(row: Row): return results @staticmethod - @CrossSync.convert async def _coro_wrapper(stream): return stream - @CrossSync.convert async def _process_chunks(self, *chunks): async def _row_stream(): yield ReadRowsResponse(chunks=chunks)