Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
…ve-ai-python into caching

Change-Id: Id2b259fe4b2c91653bf5e4d5e883f556366d8676
  • Loading branch information
mayureshagashe2105 committed May 30, 2024
2 parents bf6551a + f08c789 commit 82d3c5a
Show file tree
Hide file tree
Showing 47 changed files with 1,501 additions and 1,426 deletions.
131 changes: 5 additions & 126 deletions docs/build_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,77 +44,13 @@
# For showing the conditional imports and types in `content_types.py`
# grpc must be imported first.
typing.TYPE_CHECKING = True
from google import generativeai as palm

from google import generativeai as genai

from tensorflow_docs.api_generator import generate_lib
from tensorflow_docs.api_generator import public_api

import yaml

glm.__doc__ = """\
This package, `google.ai.generativelanguage`, is a low-level auto-generated client library for the PaLM API.
```posix-terminal
pip install google.ai.generativelanguage
```
It is built using the same tooling as Google Cloud client libraries, and will be quite familiar if you've used
those before.
While we encourage Python users to access the PaLM API using the `google.generativeai` package (aka `palm`),
this lower level package is also available.
Each method in the PaLM API is connected to one of the client classes. Pass your API-key to the class' `client_options`
when initializing a client:
```
from google.ai import generativelanguage as glm
client = glm.DiscussServiceClient(
client_options={'api_key':'YOUR_API_KEY'})
```
To call the api, pass an appropriate request-proto-object. For the `DiscussServiceClient.generate_message` pass
a `generativelanguage.GenerateMessageRequest` instance:
```
request = glm.GenerateMessageRequest(
model='models/chat-bison-001',
prompt=glm.MessagePrompt(
messages=[glm.Message(content='Hello!')]))
client.generate_message(request)
```
```
candidates {
author: "1"
content: "Hello! How can I help you today?"
}
...
```
For simplicity:
* The API methods also accept key-word arguments.
* Anywhere you might pass a proto-object, the library will also accept simple python structures.
So the following is equivalent to the previous example:
```
client.generate_message(
model='models/chat-bison-001',
prompt={'messages':[{'content':'Hello!'}]})
```
```
candidates {
author: "1"
content: "Hello! How can I help you today?"
}
...
```
"""

HERE = pathlib.Path(__file__).parent

PROJECT_SHORT_NAME = "genai"
Expand All @@ -139,43 +75,6 @@
)


class MyFilter:
def __init__(self, base_dirs):
self.filter_base_dirs = public_api.FilterBaseDirs(base_dirs)

def drop_staticmethods(self, parent, children):
parent = dict(parent.__dict__)
for name, value in children:
if not isinstance(parent.get(name, None), staticmethod):
yield name, value

def __call__(self, path, parent, children):
if any("generativelanguage" in part for part in path) or "generativeai" in path:
children = self.filter_base_dirs(path, parent, children)
children = public_api.explicit_package_contents_filter(path, parent, children)

if any("generativelanguage" in part for part in path):
if "ServiceClient" in path[-1] or "ServiceAsyncClient" in path[-1]:
children = list(self.drop_staticmethods(parent, children))

return children


class MyDocGenerator(generate_lib.DocGenerator):
def make_default_filters(self):
return [
# filter the api.
public_api.FailIfNestedTooDeep(10),
public_api.filter_module_all,
public_api.add_proto_fields,
public_api.filter_private_symbols,
MyFilter(self._base_dir), # Replaces: public_api.FilterBaseDirs(self._base_dir),
public_api.FilterPrivateMap(self._private_map),
public_api.filter_doc_controls_skip,
public_api.ignore_typing,
]


def gen_api_docs():
"""Generates api docs for the generative-ai package."""
for name in dir(google):
Expand All @@ -188,11 +87,11 @@ def gen_api_docs():
"""
)

doc_generator = MyDocGenerator(
doc_generator = generate_lib.DocGenerator(
root_title=PROJECT_FULL_NAME,
py_modules=[("google", google)],
py_modules=[("google.generativeai", genai)],
base_dir=(
pathlib.Path(palm.__file__).parent,
pathlib.Path(genai.__file__).parent,
pathlib.Path(glm.__file__).parent.parent,
),
code_url_prefix=(
Expand All @@ -201,32 +100,12 @@ def gen_api_docs():
),
search_hints=_SEARCH_HINTS.value,
site_path=_SITE_PATH.value,
callbacks=[],
callbacks=[public_api.explicit_package_contents_filter],
)

out_path = pathlib.Path(_OUTPUT_DIR.value)
doc_generator.build(out_path)

# Fixup the toc file.
toc_path = out_path / "google/_toc.yaml"
toc = yaml.safe_load(toc_path.read_text())
assert toc["toc"][0]["title"] == "google"
toc["toc"] = toc["toc"][1:]
toc["toc"][0]["title"] = "google.ai.generativelanguage"
toc["toc"][0]["section"] = toc["toc"][0]["section"][1]["section"]
toc["toc"][0], toc["toc"][1] = toc["toc"][1], toc["toc"][0]
toc_path.write_text(yaml.dump(toc))

# remove some dummy files and redirect them to `api/`
(out_path / "google.md").unlink()
(out_path / "google/ai.md").unlink()
redirects_path = out_path / "_redirects.yaml"
redirects = {"redirects": []}
redirects["redirects"].insert(0, {"from": "/api/python/google/ai", "to": "/api/"})
redirects["redirects"].insert(0, {"from": "/api/python/google", "to": "/api/"})
redirects["redirects"].insert(0, {"from": "/api/python", "to": "/api/"})
redirects_path.write_text(yaml.dump(redirects))

# clear `oneof` junk from proto pages
for fpath in out_path.rglob("*.md"):
old_content = fpath.read_text()
Expand Down
5 changes: 3 additions & 2 deletions google/generativeai/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@
genai.configure(api_key=os.environ['API_KEY'])
model = genai.GenerativeModel(name='gemini-pro')
response = model.generate_content('Please summarise this document: ...')
model = genai.GenerativeModel(name='gemini-1.5-flash')
response = model.generate_content('Teach me about how an LLM works')
print(response.text)
```
Expand All @@ -42,6 +42,7 @@

from google.generativeai import version

from google.generativeai import protos
from google.generativeai import types
from google.generativeai.types import GenerationConfig

Expand Down
59 changes: 31 additions & 28 deletions google/generativeai/answer.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
from typing_extensions import TypedDict

import google.ai.generativelanguage as glm
from google.generativeai import protos

from google.generativeai.client import (
get_default_generative_client,
Expand All @@ -35,7 +36,7 @@

DEFAULT_ANSWER_MODEL = "models/aqa"

AnswerStyle = glm.GenerateAnswerRequest.AnswerStyle
AnswerStyle = protos.GenerateAnswerRequest.AnswerStyle

AnswerStyleOptions = Union[int, str, AnswerStyle]

Expand Down Expand Up @@ -66,28 +67,30 @@ def to_answer_style(x: AnswerStyleOptions) -> AnswerStyle:


GroundingPassageOptions = (
Union[glm.GroundingPassage, tuple[str, content_types.ContentType], content_types.ContentType],
Union[
protos.GroundingPassage, tuple[str, content_types.ContentType], content_types.ContentType
],
)

GroundingPassagesOptions = Union[
glm.GroundingPassages,
protos.GroundingPassages,
Iterable[GroundingPassageOptions],
Mapping[str, content_types.ContentType],
]


def _make_grounding_passages(source: GroundingPassagesOptions) -> glm.GroundingPassages:
def _make_grounding_passages(source: GroundingPassagesOptions) -> protos.GroundingPassages:
"""
Converts the `source` into a `glm.GroundingPassage`. A `GroundingPassages` contains a list of
`glm.GroundingPassage` objects, which each contain a `glm.Contant` and a string `id`.
Converts the `source` into a `protos.GroundingPassage`. A `GroundingPassages` contains a list of
`protos.GroundingPassage` objects, which each contain a `protos.Contant` and a string `id`.
Args:
source: `Content` or a `GroundingPassagesOptions` that will be converted to glm.GroundingPassages.
source: `Content` or a `GroundingPassagesOptions` that will be converted to protos.GroundingPassages.
Return:
`glm.GroundingPassages` to be passed into `glm.GenerateAnswer`.
`protos.GroundingPassages` to be passed into `protos.GenerateAnswer`.
"""
if isinstance(source, glm.GroundingPassages):
if isinstance(source, protos.GroundingPassages):
return source

if not isinstance(source, Iterable):
Expand All @@ -100,19 +103,19 @@ def _make_grounding_passages(source: GroundingPassagesOptions) -> glm.GroundingP
source = source.items()

for n, data in enumerate(source):
if isinstance(data, glm.GroundingPassage):
if isinstance(data, protos.GroundingPassage):
passages.append(data)
elif isinstance(data, tuple):
id, content = data # tuple must have exactly 2 items.
passages.append({"id": id, "content": content_types.to_content(content)})
else:
passages.append({"id": str(n), "content": content_types.to_content(data)})

return glm.GroundingPassages(passages=passages)
return protos.GroundingPassages(passages=passages)


SourceNameType = Union[
str, retriever_types.Corpus, glm.Corpus, retriever_types.Document, glm.Document
str, retriever_types.Corpus, protos.Corpus, retriever_types.Document, protos.Document
]


Expand All @@ -127,15 +130,15 @@ class SemanticRetrieverConfigDict(TypedDict):
SemanticRetrieverConfigOptions = Union[
SourceNameType,
SemanticRetrieverConfigDict,
glm.SemanticRetrieverConfig,
protos.SemanticRetrieverConfig,
]


def _maybe_get_source_name(source) -> str | None:
if isinstance(source, str):
return source
elif isinstance(
source, (retriever_types.Corpus, glm.Corpus, retriever_types.Document, glm.Document)
source, (retriever_types.Corpus, protos.Corpus, retriever_types.Document, protos.Document)
):
return source.name
else:
Expand All @@ -145,8 +148,8 @@ def _maybe_get_source_name(source) -> str | None:
def _make_semantic_retriever_config(
source: SemanticRetrieverConfigOptions,
query: content_types.ContentsType,
) -> glm.SemanticRetrieverConfig:
if isinstance(source, glm.SemanticRetrieverConfig):
) -> protos.SemanticRetrieverConfig:
if isinstance(source, protos.SemanticRetrieverConfig):
return source

name = _maybe_get_source_name(source)
Expand All @@ -156,7 +159,7 @@ def _make_semantic_retriever_config(
source["source"] = _maybe_get_source_name(source["source"])
else:
raise TypeError(
f"Invalid input: Failed to create a 'glm.SemanticRetrieverConfig' from the provided source. "
f"Invalid input: Failed to create a 'protos.SemanticRetrieverConfig' from the provided source. "
f"Received type: {type(source).__name__}, "
f"Received value: {source}"
)
Expand All @@ -166,7 +169,7 @@ def _make_semantic_retriever_config(
elif isinstance(source["query"], str):
source["query"] = content_types.to_content(source["query"])

return glm.SemanticRetrieverConfig(source)
return protos.SemanticRetrieverConfig(source)


def _make_generate_answer_request(
Expand All @@ -178,26 +181,26 @@ def _make_generate_answer_request(
answer_style: AnswerStyle | None = None,
safety_settings: safety_types.SafetySettingOptions | None = None,
temperature: float | None = None,
) -> glm.GenerateAnswerRequest:
) -> protos.GenerateAnswerRequest:
"""
constructs a glm.GenerateAnswerRequest object by organizing the input parameters for the API call to generate a grounded answer from the model.
constructs a protos.GenerateAnswerRequest object by organizing the input parameters for the API call to generate a grounded answer from the model.
Args:
model: Name of the model used to generate the grounded response.
contents: Content of the current conversation with the model. For single-turn query, this is a
single question to answer. For multi-turn queries, this is a repeated field that contains
conversation history and the last `Content` in the list containing the question.
inline_passages: Grounding passages (a list of `Content`-like objects or `(id, content)` pairs,
or a `glm.GroundingPassages`) to send inline with the request. Exclusive with `semantic_retreiver`,
or a `protos.GroundingPassages`) to send inline with the request. Exclusive with `semantic_retreiver`,
one must be set, but not both.
semantic_retriever: A Corpus, Document, or `glm.SemanticRetrieverConfig` to use for grounding. Exclusive with
semantic_retriever: A Corpus, Document, or `protos.SemanticRetrieverConfig` to use for grounding. Exclusive with
`inline_passages`, one must be set, but not both.
answer_style: Style for grounded answers.
safety_settings: Safety settings for generated output.
temperature: The temperature for randomness in the output.
Returns:
Call for glm.GenerateAnswerRequest().
Call for protos.GenerateAnswerRequest().
"""
model = model_types.make_model_name(model)

Expand All @@ -224,7 +227,7 @@ def _make_generate_answer_request(
if answer_style:
answer_style = to_answer_style(answer_style)

return glm.GenerateAnswerRequest(
return protos.GenerateAnswerRequest(
model=model,
contents=contents,
inline_passages=inline_passages,
Expand Down Expand Up @@ -273,9 +276,9 @@ def generate_answer(
contents: The question to be answered by the model, grounded in the
provided source.
inline_passages: Grounding passages (a list of `Content`-like objects or (id, content) pairs,
or a `glm.GroundingPassages`) to send inline with the request. Exclusive with `semantic_retreiver`,
or a `protos.GroundingPassages`) to send inline with the request. Exclusive with `semantic_retreiver`,
one must be set, but not both.
semantic_retriever: A Corpus, Document, or `glm.SemanticRetrieverConfig` to use for grounding. Exclusive with
semantic_retriever: A Corpus, Document, or `protos.SemanticRetrieverConfig` to use for grounding. Exclusive with
`inline_passages`, one must be set, but not both.
answer_style: Style in which the grounded answer should be returned.
safety_settings: Safety settings for generated output. Defaults to None.
Expand Down Expand Up @@ -327,9 +330,9 @@ async def generate_answer_async(
contents: The question to be answered by the model, grounded in the
provided source.
inline_passages: Grounding passages (a list of `Content`-like objects or (id, content) pairs,
or a `glm.GroundingPassages`) to send inline with the request. Exclusive with `semantic_retreiver`,
or a `protos.GroundingPassages`) to send inline with the request. Exclusive with `semantic_retreiver`,
one must be set, but not both.
semantic_retriever: A Corpus, Document, or `glm.SemanticRetrieverConfig` to use for grounding. Exclusive with
semantic_retriever: A Corpus, Document, or `protos.SemanticRetrieverConfig` to use for grounding. Exclusive with
`inline_passages`, one must be set, but not both.
answer_style: Style in which the grounded answer should be returned.
safety_settings: Safety settings for generated output. Defaults to None.
Expand Down
3 changes: 2 additions & 1 deletion google/generativeai/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import httplib2

import google.ai.generativelanguage as glm
import google.generativeai.protos as protos

from google.auth import credentials as ga_credentials
from google.auth import exceptions as ga_exceptions
Expand Down Expand Up @@ -76,7 +77,7 @@ def create_file(
name: str | None = None,
display_name: str | None = None,
resumable: bool = True,
) -> glm.File:
) -> protos.File:
if self._discovery_api is None:
self._setup_discovery_api()

Expand Down
Loading

0 comments on commit 82d3c5a

Please sign in to comment.