Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/geo3k_vlm_multi_turn/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ In terms of the environment interaction, this example initializes a custom inter

The reward model is the default math RM.

![VLM multi-turn geo3k reward](vlm_multi_turn_geo3k_reward.png)
![VLM multi-turn geo3k reward](geo3k_vlm_multi_turn_reward.png)

## Reproduce
```bash
Expand Down
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
16 changes: 9 additions & 7 deletions examples/geo3k_vlm_multi_turn/rollout.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,12 @@

DEFAULT_ENV_MODULE = "examples.vlm_multi_turn.env_geo3k"

# Dummy messages used for calculating trim length in chat template encoding
DUMMY_MESSAGES = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "I am a user."},
]


def _load_env_module(env_path: str | None):
"""Load the interaction environment module from a module path or a file path."""
Expand Down Expand Up @@ -61,21 +67,17 @@ def _encode_observation_for_generation(
apply_kwargs = apply_chat_template_kwargs or {}

trim_length = 0
dummy_messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "I am a user."},
]

if apply_chat_template:
dummy_prompt = tokenizer.apply_chat_template(
dummy_messages,
DUMMY_MESSAGES,
tools=tools,
tokenize=False,
add_generation_prompt=False,
**apply_kwargs,
)
formatted_prompt = tokenizer.apply_chat_template(
dummy_messages + [message],
DUMMY_MESSAGES + [message],
tools=tools,
tokenize=False,
add_generation_prompt=True,
Expand Down Expand Up @@ -343,7 +345,7 @@ async def generate(args: Any, sample: Sample, sampling_params) -> Sample:
sample.status = Sample.Status.COMPLETED
break

obs_log_probs = [float("-inf")] * len(obs_prompt_ids)
obs_log_probs = [0.0] * len(obs_prompt_ids)
_append_to_sample(sample, response_tokens, obs_prompt_ids, obs_log_probs, loss_mask_val=0)
budget = _update_budget(budget, len(obs_prompt_ids))

Expand Down
Binary file not shown.