Skip to content

Commit 4fec982

Browse files
committed
fix: adjust naming and tests and rebase typo
1 parent ef2363c commit 4fec982

File tree

7 files changed

+139
-135
lines changed

7 files changed

+139
-135
lines changed

clients/python/text_generation/types.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ class ChatComplete(BaseModel):
115115
usage: Any
116116

117117

118-
class CompletionComplete(BaseModel):
118+
class Completion(BaseModel):
119119
# Completion details
120120
id: str
121121
object: str

integration-tests/conftest.py

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
ChatComplete,
2828
ChatCompletionChunk,
2929
ChatCompletionComplete,
30-
CompletionComplete,
30+
Completion,
3131
)
3232

3333
DOCKER_IMAGE = os.getenv("DOCKER_IMAGE", None)
@@ -71,17 +71,22 @@ def convert_data(data):
7171
data = json.loads(data)
7272
if isinstance(data, Dict) and "choices" in data:
7373
choices = data["choices"]
74-
print(choices)
7574
if isinstance(choices, List) and len(choices) >= 1:
7675
if "delta" in choices[0]:
7776
return ChatCompletionChunk(**data)
7877
if "text" in choices[0]:
79-
return CompletionComplete(**data)
78+
return Completion(**data)
8079
return ChatComplete(**data)
8180

8281
if isinstance(data, Dict):
8382
return Response(**data)
8483
if isinstance(data, List):
84+
if (
85+
len(data) > 0
86+
and "object" in data[0]
87+
and data[0]["object"] == "text_completion"
88+
):
89+
return [Completion(**d) for d in data]
8590
return [Response(**d) for d in data]
8691
raise NotImplementedError
8792

@@ -163,7 +168,7 @@ def eq_details(details: Details, other: Details) -> bool:
163168
)
164169
)
165170

166-
def eq_completion(response: ChatComplete, other: ChatComplete) -> bool:
171+
def eq_completion(response: Completion, other: Completion) -> bool:
167172
return response.choices[0].text == other.choices[0].text
168173

169174
def eq_chat_complete(response: ChatComplete, other: ChatComplete) -> bool:
@@ -189,7 +194,7 @@ def eq_response(response: Response, other: Response) -> bool:
189194
if not isinstance(snapshot_data, List):
190195
snapshot_data = [snapshot_data]
191196

192-
if isinstance(serialized_data[0], CompletionComplete):
197+
if isinstance(serialized_data[0], Completion):
193198
return len(snapshot_data) == len(serialized_data) and all(
194199
[eq_completion(r, o) for r, o in zip(serialized_data, snapshot_data)]
195200
)

integration-tests/models/__snapshots__/test_completion_prompts/test_flash_llama_completion_many_prompts.json

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,12 +6,6 @@
66
"logprobs": null,
77
"text": " PR for more information?"
88
},
9-
{
10-
"finish_reason": "length",
11-
"index": 3,
12-
"logprobs": null,
13-
"text": "hd20220811-"
14-
},
159
{
1610
"finish_reason": "length",
1711
"index": 0,
@@ -23,13 +17,19 @@
2317
"index": 2,
2418
"logprobs": null,
2519
"text": " severely flawed and often has a substandard"
20+
},
21+
{
22+
"finish_reason": "length",
23+
"index": 3,
24+
"logprobs": null,
25+
"text": "hd20220811-"
2626
}
2727
],
28-
"created": 1712875413,
28+
"created": 1713284455,
2929
"id": "",
3030
"model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
3131
"object": "text_completion",
32-
"system_fingerprint": "1.4.5-native",
32+
"system_fingerprint": "2.0.0-native",
3333
"usage": {
3434
"completion_tokens": 36,
3535
"prompt_tokens": 8,

0 commit comments

Comments
 (0)