Skip to content

Commit

Permalink
docs: created samples for load table and create table from schema file (
Browse files Browse the repository at this point in the history
#1436)

* docs: created samples for load table and create table from schema file

* 🦉 Updates from OwlBot post-processor

See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md

* Apply suggestions from code review

Co-authored-by: Tim Swast <swast@google.com>

* Update samples/snippets/create_table_schema_from_json.py

Co-authored-by: Tim Swast <swast@google.com>

Co-authored-by: Owl Bot <gcf-owl-bot[bot]@users.noreply.github.com>
Co-authored-by: Tim Swast <swast@google.com>
  • Loading branch information
3 people authored Dec 13, 2022
1 parent 89f8e9b commit 8ad2e5b
Show file tree
Hide file tree
Showing 11 changed files with 201 additions and 4 deletions.
42 changes: 42 additions & 0 deletions samples/snippets/create_table_schema_from_json.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import pathlib


def create_table(table_id: str) -> None:
orig_table_id = table_id
current_directory = pathlib.Path(__file__).parent
orig_schema_path = str(current_directory / "schema.json")
# [START bigquery_schema_file_create]
from google.cloud import bigquery

client = bigquery.Client()

# TODO(dev): Change table_id to the full name of the table you want to create.
table_id = "your-project.your_dataset.your_table_name"
# TODO(dev): Change schema_path variable to the path of your schema file.
schema_path = "path/to/schema.json"
# [END bigquery_schema_file_create]
table_id = orig_table_id
schema_path = orig_schema_path

# [START bigquery_schema_file_create]
# To load a schema file use the schema_from_json method.
schema = client.schema_from_json(schema_path)

table = bigquery.Table(table_id, schema=schema)
table = client.create_table(table) # API request
print(f"Created table {table_id}.")
# [END bigquery_schema_file_create]
32 changes: 32 additions & 0 deletions samples/snippets/create_table_schema_from_json_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import typing

import create_table_schema_from_json

if typing.TYPE_CHECKING:
import pytest


def test_create_table(
capsys: "pytest.CaptureFixture[str]",
random_table_id: str,
) -> None:

create_table_schema_from_json.create_table(random_table_id)

out, _ = capsys.readouterr()
assert "Created" in out
assert random_table_id in out
2 changes: 1 addition & 1 deletion samples/snippets/dataset_access_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@
import update_dataset_access

if typing.TYPE_CHECKING:
import pytest
from google.cloud import bigquery
import pytest


def test_dataset_access_permissions(
Expand Down
2 changes: 1 addition & 1 deletion samples/snippets/delete_job.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@ def delete_job_metadata(job_id: str, location: str) -> None:
orig_job_id = job_id
orig_location = location
# [START bigquery_delete_job]
from google.cloud import bigquery
from google.api_core import exceptions
from google.cloud import bigquery

# TODO(developer): Set the job ID to the ID of the job whose metadata you
# wish to delete.
Expand Down
60 changes: 60 additions & 0 deletions samples/snippets/load_table_schema_from_json.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import pathlib


def load_table(table_id: str) -> None:
orig_uri = "gs://cloud-samples-data/bigquery/us-states/us-states.csv"
orig_table_id = table_id
current_directory = pathlib.Path(__file__).parent
orig_schema_path = str(current_directory / "schema_us_states.json")
# [START bigquery_schema_file_load]
from google.cloud import bigquery

client = bigquery.Client()

# TODO(dev): Change uri variable to the path of your data file.
uri = "gs://your-bucket/path/to/your-file.csv"
# TODO(dev): Change table_id to the full name of the table you want to create.
table_id = "your-project.your_dataset.your_table"
# TODO(dev): Change schema_path variable to the path of your schema file.
schema_path = "path/to/schema.json"
# [END bigquery_schema_file_load]
uri = orig_uri
table_id = orig_table_id
schema_path = orig_schema_path
# [START bigquery_schema_file_load]
# To load a schema file use the schema_from_json method.
schema = client.schema_from_json(schema_path)

job_config = bigquery.LoadJobConfig(
# To use the schema you loaded pass it into the
# LoadJobConfig constructor.
schema=schema,
skip_leading_rows=1,
)

# Pass the job_config object to the load_table_from_file,
# load_table_from_json, or load_table_from_uri method
# to use the schema on a new table.
load_job = client.load_table_from_uri(
uri, table_id, job_config=job_config
) # Make an API request.

load_job.result() # Waits for the job to complete.

destination_table = client.get_table(table_id) # Make an API request.
print(f"Loaded {destination_table.num_rows} rows to {table_id}.")
# [END bigquery_schema_file_load]
32 changes: 32 additions & 0 deletions samples/snippets/load_table_schema_from_json_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import typing

import load_table_schema_from_json

if typing.TYPE_CHECKING:
import pytest


def test_load_table(
capsys: "pytest.CaptureFixture[str]",
random_table_id: str,
) -> None:

load_table_schema_from_json.load_table(random_table_id)

out, _ = capsys.readouterr()
assert "Loaded" in out
assert random_table_id in out
1 change: 1 addition & 0 deletions samples/snippets/materialized_view.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ def update_materialized_view(

# [START bigquery_update_materialized_view]
import datetime

from google.cloud import bigquery

bigquery_client = bigquery.Client()
Expand Down
1 change: 0 additions & 1 deletion samples/snippets/quickstart_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@

import quickstart


# Must match the dataset listed in quickstart.py (there's no easy way to
# extract this).
DATASET_ID = "my_new_dataset"
Expand Down
20 changes: 20 additions & 0 deletions samples/snippets/schema.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
[
{
"name": "qtr",
"type": "STRING",
"mode": "REQUIRED",
"description": "quarter"
},
{
"name": "rep",
"type": "STRING",
"mode": "NULLABLE",
"description": "sales representative"
},
{
"name": "sales",
"type": "FLOAT",
"mode": "NULLABLE",
"defaultValueExpression": "2.55"
}
]
12 changes: 12 additions & 0 deletions samples/snippets/schema_us_states.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
[
{
"name": "name",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "post_abbr",
"type": "STRING",
"mode": "NULLABLE"
}
]
1 change: 0 additions & 1 deletion samples/snippets/user_credentials_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@

from user_credentials import main


PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"]

MockType = Union[mock.mock.MagicMock, mock.mock.AsyncMock]
Expand Down

0 comments on commit 8ad2e5b

Please sign in to comment.