-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdemo_bedrock_langchain.py
119 lines (80 loc) · 3.39 KB
/
demo_bedrock_langchain.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
from langchain.llms import Bedrock
from langchain.chat_models import BedrockChat
from langchain.prompts import PromptTemplate
from langchain.schema.output_parser import StrOutputParser
import json
def run_demo(session):
bedrock = session.client('bedrock')
bedrock_runtime = session.client('bedrock-runtime', region_name="us-east-1")
model_id = "anthropic.claude-instant-v1"
model_id = "anthropic.claude-v1"
model_id = "meta.llama2-13b-chat-v1" #NotImplementedError: Provider meta model does not support chat.
model_id = "cohere.command-text-v14"
model_kwargs = { "temperature": 0.0 }
model_kwargs_meta = { "temperature": 0.0, "top_p" : 0.9, "max_gen_len": 512 }
prompt = "Tell me a fun fact about Pluto."
prompts = ["Tell me a fun fact about Pluto.", "Tell me a fun fact about Venus."]
#demo_invoke_model_meta(bedrock_runtime, model_id, prompt)
demo_llm(bedrock_runtime, model_id, model_kwargs_meta, prompt)
#demo_llm_predict(bedrock_runtime, model_id, model_kwargs, prompt)
#demo_llm_generate(bedrock_runtime, model_id, model_kwargs, prompts)
#demo_llm_chain(bedrock_runtime, model_id, model_kwargs, prompt)
def demo_invoke_model_meta(bedrock_runtime, model_id, prompt):
print(f"Call demo_invoke_model_meta model_id={model_id} prompt={prompt}")
request = {
"prompt": prompt,
"temperature": 0.2,
"top_p": 0.9,
"max_gen_len": 512
}
response = bedrock_runtime.invoke_model(modelId = model_id, accept='application/json', contentType='application/json', body = json.dumps(request))
response_body_json = json.loads(response["body"].read())
print("*************************************************")
print(response_body_json)
completion = response_body_json['generation']
print("*************************************************")
print(f"Answer: {completion}")
print("*************************************************")
def demo_llm(bedrock_runtime, model_id, model_kwargs, prompt):
print(f"Call demo_llm model_id={model_id} model_kwargs={model_kwargs} prompt={prompt}")
llm = Bedrock(
client = bedrock_runtime,
model_id = model_id,
model_kwargs = model_kwargs,
)
output = llm(prompt)
print(output)
def demo_llm_predict(bedrock_runtime, model_id, model_kwargs, prompt):
print("Call demo_llm_predict")
llm = Bedrock(
client = bedrock_runtime,
model_id = model_id,
model_kwargs = model_kwargs,
)
output = llm.predict(prompt)
print(output)
def demo_llm_generate(bedrock_runtime, model_id, model_kwargs, prompts):
print("Call demo_llm_generate")
llm = Bedrock(
client = bedrock_runtime,
model_id = model_id,
model_kwargs = model_kwargs,
)
output = llm.generate(prompts)
#print(output.schema())
print(output.llm_output)
for generation in output.generations:
print(generation[0].text)
print("")
def demo_llm_chain(bedrock_runtime, model_id, model_kwargs, prompt):
print("Call demo_llm_chain")
prompt_template = PromptTemplate(input_variables = [], template = prompt)
llm = BedrockChat(
client = bedrock_runtime,
model_id = model_id,
model_kwargs = model_kwargs,
)
chain = prompt_template | llm | StrOutputParser()
output = chain.invoke({"foo": "bears"})
#print(output.schema())
print(output)