Skip to content

Commit

Permalink
Merge pull request #1912 from tseaver/bigtable-v2-alias_imports_endpo…
Browse files Browse the repository at this point in the history
…ints

Alias Bigtable V1 imports / factories / entry point constants.
  • Loading branch information
tseaver authored Jun 27, 2016
2 parents 377f0da + 47b9246 commit cdbc47c
Show file tree
Hide file tree
Showing 13 changed files with 631 additions and 532 deletions.
76 changes: 45 additions & 31 deletions gcloud/bigtable/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,40 +31,52 @@

from grpc.beta import implementations

from gcloud.bigtable._generated import bigtable_cluster_data_pb2 as data_pb2
from gcloud.bigtable._generated import bigtable_cluster_service_pb2
# Cluster admin service is V1-only (V2 provides instance admin instead)
from gcloud.bigtable._generated import (
bigtable_cluster_service_messages_pb2 as messages_pb2)
from gcloud.bigtable._generated import bigtable_service_pb2
from gcloud.bigtable._generated import bigtable_table_service_pb2
from gcloud.bigtable._generated import operations_grpc_pb2
bigtable_cluster_data_pb2 as cluster_data_v1_pb2)
from gcloud.bigtable._generated import (
bigtable_cluster_service_pb2 as cluster_service_v1_pb2)
from gcloud.bigtable._generated import (
bigtable_cluster_service_messages_pb2 as cluster_messages_v1_pb2)
# V1 table admin service
from gcloud.bigtable._generated import (
bigtable_table_service_pb2 as table_service_v1_pb2)
# V1 data service
from gcloud.bigtable._generated import (
bigtable_service_pb2 as data_service_v1_pb2)

from gcloud.bigtable._generated import (
operations_grpc_pb2 as operations_grpc_v1_pb2)

from gcloud.bigtable.cluster import Cluster
from gcloud.client import _ClientFactoryMixin
from gcloud.client import _ClientProjectMixin
from gcloud.credentials import get_credentials


TABLE_STUB_FACTORY = (
bigtable_table_service_pb2.beta_create_BigtableTableService_stub)
TABLE_ADMIN_HOST = 'bigtabletableadmin.googleapis.com'
TABLE_STUB_FACTORY_V1 = (
table_service_v1_pb2.beta_create_BigtableTableService_stub)
TABLE_ADMIN_HOST_V1 = 'bigtabletableadmin.googleapis.com'
"""Table Admin API request host."""
TABLE_ADMIN_PORT = 443
TABLE_ADMIN_PORT_V1 = 443
"""Table Admin API request port."""

CLUSTER_STUB_FACTORY = (
bigtable_cluster_service_pb2.beta_create_BigtableClusterService_stub)
CLUSTER_ADMIN_HOST = 'bigtableclusteradmin.googleapis.com'
CLUSTER_STUB_FACTORY_V1 = (
cluster_service_v1_pb2.beta_create_BigtableClusterService_stub)
CLUSTER_ADMIN_HOST_V1 = 'bigtableclusteradmin.googleapis.com'
"""Cluster Admin API request host."""
CLUSTER_ADMIN_PORT = 443
CLUSTER_ADMIN_PORT_V1 = 443
"""Cluster Admin API request port."""

DATA_STUB_FACTORY = bigtable_service_pb2.beta_create_BigtableService_stub
DATA_API_HOST = 'bigtable.googleapis.com'
DATA_STUB_FACTORY_V1 = data_service_v1_pb2.beta_create_BigtableService_stub
DATA_API_HOST_V1 = 'bigtable.googleapis.com'
"""Data API request host."""
DATA_API_PORT = 443
DATA_API_PORT_V1 = 443
"""Data API request port."""

OPERATIONS_STUB_FACTORY = operations_grpc_pb2.beta_create_Operations_stub
OPERATIONS_STUB_FACTORY_V1 = operations_grpc_v1_pb2.beta_create_Operations_stub
OPERATIONS_API_HOST_V1 = CLUSTER_ADMIN_HOST_V1
OPERATIONS_API_PORT_V1 = CLUSTER_ADMIN_PORT_V1

ADMIN_SCOPE = 'https://www.googleapis.com/auth/bigtable.admin'
"""Scope for interacting with the Cluster Admin and Table Admin APIs."""
Expand Down Expand Up @@ -275,17 +287,17 @@ def _make_data_stub(self):
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
"""
return _make_stub(self, DATA_STUB_FACTORY,
DATA_API_HOST, DATA_API_PORT)
return _make_stub(self, DATA_STUB_FACTORY_V1,
DATA_API_HOST_V1, DATA_API_PORT_V1)

def _make_cluster_stub(self):
"""Creates gRPC stub to make requests to the Cluster Admin API.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
"""
return _make_stub(self, CLUSTER_STUB_FACTORY,
CLUSTER_ADMIN_HOST, CLUSTER_ADMIN_PORT)
return _make_stub(self, CLUSTER_STUB_FACTORY_V1,
CLUSTER_ADMIN_HOST_V1, CLUSTER_ADMIN_PORT_V1)

def _make_operations_stub(self):
"""Creates gRPC stub to make requests to the Operations API.
Expand All @@ -296,17 +308,17 @@ def _make_operations_stub(self):
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
"""
return _make_stub(self, OPERATIONS_STUB_FACTORY,
CLUSTER_ADMIN_HOST, CLUSTER_ADMIN_PORT)
return _make_stub(self, OPERATIONS_STUB_FACTORY_V1,
OPERATIONS_API_HOST_V1, OPERATIONS_API_PORT_V1)

def _make_table_stub(self):
"""Creates gRPC stub to make requests to the Table Admin API.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
"""
return _make_stub(self, TABLE_STUB_FACTORY,
TABLE_ADMIN_HOST, TABLE_ADMIN_PORT)
return _make_stub(self, TABLE_STUB_FACTORY_V1,
TABLE_ADMIN_HOST_V1, TABLE_ADMIN_PORT_V1)

def is_started(self):
"""Check if the client has been started.
Expand Down Expand Up @@ -401,14 +413,15 @@ def list_zones(self):
:raises: :class:`ValueError <exceptions.ValueError>` if one of the
zones is not in ``OK`` state.
"""
request_pb = messages_pb2.ListZonesRequest(name=self.project_name)
# We expect a `.messages_pb2.ListZonesResponse`
request_pb = cluster_messages_v1_pb2.ListZonesRequest(
name=self.project_name)
# We expect a `.cluster_messages_v1_pb2.ListZonesResponse`
list_zones_response = self._cluster_stub.ListZones(
request_pb, self.timeout_seconds)

result = []
for zone in list_zones_response.zones:
if zone.status != data_pb2.Zone.OK:
if zone.status != cluster_data_v1_pb2.Zone.OK:
raise ValueError('Zone %s not in OK state' % (
zone.display_name,))
result.append(zone.display_name)
Expand All @@ -422,8 +435,9 @@ def list_clusters(self):
returned and the second is a list of strings (the failed
zones in the request).
"""
request_pb = messages_pb2.ListClustersRequest(name=self.project_name)
# We expect a `.messages_pb2.ListClustersResponse`
request_pb = cluster_messages_v1_pb2.ListClustersRequest(
name=self.project_name)
# We expect a `.cluster_messages_v1_pb2.ListClustersResponse`
list_clusters_response = self._cluster_stub.ListClusters(
request_pb, self.timeout_seconds)

Expand Down
33 changes: 17 additions & 16 deletions gcloud/bigtable/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,12 @@
from google.longrunning import operations_pb2

from gcloud._helpers import _pb_timestamp_to_datetime
from gcloud.bigtable._generated import bigtable_cluster_data_pb2 as data_pb2
from gcloud.bigtable._generated import (
bigtable_cluster_service_messages_pb2 as messages_pb2)
bigtable_cluster_data_pb2 as data_v1_pb2)
from gcloud.bigtable._generated import (
bigtable_table_service_messages_pb2 as table_messages_pb2)
bigtable_cluster_service_messages_pb2 as messages_v1_pb2)
from gcloud.bigtable._generated import (
bigtable_table_service_messages_pb2 as table_messages_v1_pb2)
from gcloud.bigtable.table import Table


Expand All @@ -40,9 +41,9 @@
_UPDATE_CREATE_METADATA = _ADMIN_TYPE_URL_BASE + 'UpdateClusterMetadata'
_UNDELETE_CREATE_METADATA = _ADMIN_TYPE_URL_BASE + 'UndeleteClusterMetadata'
_TYPE_URL_MAP = {
_CLUSTER_CREATE_METADATA: messages_pb2.CreateClusterMetadata,
_UPDATE_CREATE_METADATA: messages_pb2.UpdateClusterMetadata,
_UNDELETE_CREATE_METADATA: messages_pb2.UndeleteClusterMetadata,
_CLUSTER_CREATE_METADATA: messages_v1_pb2.CreateClusterMetadata,
_UPDATE_CREATE_METADATA: messages_v1_pb2.UpdateClusterMetadata,
_UNDELETE_CREATE_METADATA: messages_v1_pb2.UndeleteClusterMetadata,
}

DEFAULT_SERVE_NODES = 3
Expand All @@ -55,15 +56,15 @@ def _prepare_create_request(cluster):
:type cluster: :class:`Cluster`
:param cluster: The cluster to be created.
:rtype: :class:`.messages_pb2.CreateClusterRequest`
:rtype: :class:`.messages_v1_pb2.CreateClusterRequest`
:returns: The CreateCluster request object containing the cluster info.
"""
zone_full_name = ('projects/' + cluster._client.project +
'/zones/' + cluster.zone)
return messages_pb2.CreateClusterRequest(
return messages_v1_pb2.CreateClusterRequest(
name=zone_full_name,
cluster_id=cluster.cluster_id,
cluster=data_pb2.Cluster(
cluster=data_v1_pb2.Cluster(
display_name=cluster.display_name,
serve_nodes=cluster.serve_nodes,
),
Expand Down Expand Up @@ -198,7 +199,7 @@ class Cluster(object):
.. note::
For now, we leave out the ``default_storage_type`` (an enum)
which if not sent will end up as :data:`.data_pb2.STORAGE_SSD`.
which if not sent will end up as :data:`.data_v1_pb2.STORAGE_SSD`.
:type zone: str
:param zone: The name of the zone where the cluster resides.
Expand Down Expand Up @@ -332,7 +333,7 @@ def __ne__(self, other):

def reload(self):
"""Reload the metadata for this cluster."""
request_pb = messages_pb2.GetClusterRequest(name=self.name)
request_pb = messages_v1_pb2.GetClusterRequest(name=self.name)
# We expect a `._generated.bigtable_cluster_data_pb2.Cluster`.
cluster_pb = self._client._cluster_stub.GetCluster(
request_pb, self._client.timeout_seconds)
Expand Down Expand Up @@ -389,7 +390,7 @@ def update(self):
:returns: The long-running operation corresponding to the
update operation.
"""
request_pb = data_pb2.Cluster(
request_pb = data_v1_pb2.Cluster(
name=self.name,
display_name=self.display_name,
serve_nodes=self.serve_nodes,
Expand Down Expand Up @@ -426,7 +427,7 @@ def delete(self):
irrevocably disappear from the API, and their data will be
permanently deleted.
"""
request_pb = messages_pb2.DeleteClusterRequest(name=self.name)
request_pb = messages_v1_pb2.DeleteClusterRequest(name=self.name)
# We expect a `google.protobuf.empty_pb2.Empty`
self._client._cluster_stub.DeleteCluster(
request_pb, self._client.timeout_seconds)
Expand Down Expand Up @@ -456,7 +457,7 @@ def undelete(self):
:returns: The long-running operation corresponding to the
undelete operation.
"""
request_pb = messages_pb2.UndeleteClusterRequest(name=self.name)
request_pb = messages_v1_pb2.UndeleteClusterRequest(name=self.name)
# We expect a `google.longrunning.operations_pb2.Operation`.
operation_pb2 = self._client._cluster_stub.UndeleteCluster(
request_pb, self._client.timeout_seconds)
Expand All @@ -472,8 +473,8 @@ def list_tables(self):
:raises: :class:`ValueError <exceptions.ValueError>` if one of the
returned tables has a name that is not of the expected format.
"""
request_pb = table_messages_pb2.ListTablesRequest(name=self.name)
# We expect a `table_messages_pb2.ListTablesResponse`
request_pb = table_messages_v1_pb2.ListTablesRequest(name=self.name)
# We expect a `table_messages_v1_pb2.ListTablesResponse`
table_list_pb = self._client._table_stub.ListTables(
request_pb, self._client.timeout_seconds)

Expand Down
42 changes: 22 additions & 20 deletions gcloud/bigtable/column_family.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,10 @@
from google.protobuf import duration_pb2

from gcloud._helpers import _total_seconds
from gcloud.bigtable._generated import bigtable_table_data_pb2 as data_pb2
from gcloud.bigtable._generated import (
bigtable_table_service_messages_pb2 as messages_pb2)
bigtable_table_data_pb2 as data_v1_pb2)
from gcloud.bigtable._generated import (
bigtable_table_service_messages_pb2 as messages_v1_pb2)


def _timedelta_to_duration_pb(timedelta_val):
Expand Down Expand Up @@ -110,10 +111,10 @@ def __eq__(self, other):
def to_pb(self):
"""Converts the garbage collection rule to a protobuf.
:rtype: :class:`.data_pb2.GcRule`
:rtype: :class:`.data_v1_pb2.GcRule`
:returns: The converted current object.
"""
return data_pb2.GcRule(max_num_versions=self.max_num_versions)
return data_v1_pb2.GcRule(max_num_versions=self.max_num_versions)


class MaxAgeGCRule(GarbageCollectionRule):
Expand All @@ -134,11 +135,11 @@ def __eq__(self, other):
def to_pb(self):
"""Converts the garbage collection rule to a protobuf.
:rtype: :class:`.data_pb2.GcRule`
:rtype: :class:`.data_v1_pb2.GcRule`
:returns: The converted current object.
"""
max_age = _timedelta_to_duration_pb(self.max_age)
return data_pb2.GcRule(max_age=max_age)
return data_v1_pb2.GcRule(max_age=max_age)


class GCRuleUnion(GarbageCollectionRule):
Expand All @@ -159,12 +160,12 @@ def __eq__(self, other):
def to_pb(self):
"""Converts the union into a single GC rule as a protobuf.
:rtype: :class:`.data_pb2.GcRule`
:rtype: :class:`.data_v1_pb2.GcRule`
:returns: The converted current object.
"""
union = data_pb2.GcRule.Union(
union = data_v1_pb2.GcRule.Union(
rules=[rule.to_pb() for rule in self.rules])
return data_pb2.GcRule(union=union)
return data_v1_pb2.GcRule(union=union)


class GCRuleIntersection(GarbageCollectionRule):
Expand All @@ -185,12 +186,12 @@ def __eq__(self, other):
def to_pb(self):
"""Converts the intersection into a single GC rule as a protobuf.
:rtype: :class:`.data_pb2.GcRule`
:rtype: :class:`.data_v1_pb2.GcRule`
:returns: The converted current object.
"""
intersection = data_pb2.GcRule.Intersection(
intersection = data_v1_pb2.GcRule.Intersection(
rules=[rule.to_pb() for rule in self.rules])
return data_pb2.GcRule(intersection=intersection)
return data_v1_pb2.GcRule(intersection=intersection)


class ColumnFamily(object):
Expand Down Expand Up @@ -250,16 +251,17 @@ def __ne__(self, other):
def create(self):
"""Create this column family."""
if self.gc_rule is None:
column_family = data_pb2.ColumnFamily()
column_family = data_v1_pb2.ColumnFamily()
else:
column_family = data_pb2.ColumnFamily(gc_rule=self.gc_rule.to_pb())
request_pb = messages_pb2.CreateColumnFamilyRequest(
column_family = data_v1_pb2.ColumnFamily(
gc_rule=self.gc_rule.to_pb())
request_pb = messages_v1_pb2.CreateColumnFamilyRequest(
name=self._table.name,
column_family_id=self.column_family_id,
column_family=column_family,
)
client = self._table._cluster._client
# We expect a `.data_pb2.ColumnFamily`. We ignore it since the only
# We expect a `.data_v1_pb2.ColumnFamily`. We ignore it since the only
# data it contains are the GC rule and the column family ID already
# stored on this instance.
client._table_stub.CreateColumnFamily(request_pb,
Expand All @@ -276,17 +278,17 @@ def update(self):
request_kwargs = {'name': self.name}
if self.gc_rule is not None:
request_kwargs['gc_rule'] = self.gc_rule.to_pb()
request_pb = data_pb2.ColumnFamily(**request_kwargs)
request_pb = data_v1_pb2.ColumnFamily(**request_kwargs)
client = self._table._cluster._client
# We expect a `.data_pb2.ColumnFamily`. We ignore it since the only
# We expect a `.data_v1_pb2.ColumnFamily`. We ignore it since the only
# data it contains are the GC rule and the column family ID already
# stored on this instance.
client._table_stub.UpdateColumnFamily(request_pb,
client.timeout_seconds)

def delete(self):
"""Delete this column family."""
request_pb = messages_pb2.DeleteColumnFamilyRequest(name=self.name)
request_pb = messages_v1_pb2.DeleteColumnFamilyRequest(name=self.name)
client = self._table._cluster._client
# We expect a `google.protobuf.empty_pb2.Empty`
client._table_stub.DeleteColumnFamily(request_pb,
Expand All @@ -296,7 +298,7 @@ def delete(self):
def _gc_rule_from_pb(gc_rule_pb):
"""Convert a protobuf GC rule to a native object.
:type gc_rule_pb: :class:`.data_pb2.GcRule`
:type gc_rule_pb: :class:`.data_v1_pb2.GcRule`
:param gc_rule_pb: The GC rule to convert.
:rtype: :class:`GarbageCollectionRule` or :data:`NoneType <types.NoneType>`
Expand Down
Loading

0 comments on commit cdbc47c

Please sign in to comment.