Skip to content

Commit

Permalink
Break: Argument name and versioning
Browse files Browse the repository at this point in the history
  • Loading branch information
ashvardanian committed Dec 28, 2023
1 parent 5e0c5ec commit 908f8c6
Show file tree
Hide file tree
Showing 3 changed files with 15 additions and 7 deletions.
8 changes: 8 additions & 0 deletions .github/workflows/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,10 @@
"tag": "Add",
"release": "minor"
},
{
"tag": "Break",
"release": "major"
},
{
"tag": "Improve",
"release": "patch"
Expand All @@ -46,6 +50,10 @@
"tag": "Add",
"release": "minor"
},
{
"tag": "Break",
"release": "major"
},
{
"tag": "Improve",
"release": "patch"
Expand Down
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -141,8 +141,8 @@ decoded_text = processor.batch_decode(output[:, prompt_len:])[0]

The generative models can be used for chat-like experiences, where the user can provide both text and images as input.
To use that feature, you can start with the following CLI command:

```bash
image
```bashimage
uform-chat --model unum-cloud/uform-gen-chat --image_path=zebra.jpg
uform-chat --model unum-cloud/uform-gen-chat --image_path=zebra.jpg --device="cuda:0" --fp16
```
Expand Down
10 changes: 5 additions & 5 deletions src/uform/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@ def parse_args():
parser = ArgumentParser(description="Chat with UForm generative model")

parser.add_argument("--model", type=str, default="unum-cloud/uform-gen-chat")
parser.add_argument("--image_path", type=str, help="", required=True)
parser.add_argument("--image", type=str, help="", required=True)
parser.add_argument("--device", type=str, required=True)
parser.add_argument("--fp16", action="store_true")
parser.add_argument("--fp16", action="store_true")

return parser.parse_args()

Expand All @@ -30,18 +30,18 @@ def run_chat(opts, model, processor):

messages = [{"role": "system", "content": "You are a helpful assistant."}]
is_first_message = True
if opts.image_path.startswith("http"):
if opts.image.startswith("http"):
image = (
processor.image_processor(
Image.open(requests.get(opts.image_path, stream=True).raw)
Image.open(requests.get(opts.image, stream=True).raw)
)
.unsqueeze(0)
.to(torch.bfloat16 if opts.fp16 else torch.float32)
.to(opts.device)
)
else:
image = (
processor.image_processor(Image.open(opts.image_path))
processor.image_processor(Image.open(opts.image))
.unsqueeze(0)
.to(torch.bfloat16 if opts.fp16 else torch.float32)
.to(opts.device)
Expand Down

0 comments on commit 908f8c6

Please sign in to comment.