Skip to content

Commit

Permalink
fix model_specific_prompt_kwargs
Browse files Browse the repository at this point in the history
  • Loading branch information
Espere-1119-Song committed Oct 30, 2024
1 parent fc6973c commit 2d89d69
Show file tree
Hide file tree
Showing 5 changed files with 15 additions and 19 deletions.
4 changes: 2 additions & 2 deletions lmms_eval/tasks/cinepile/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,9 +58,9 @@ def format_question_and_options(question, options):
return formatted_string


def cinepile_doc_to_text(doc, model_specific_prompt_kwargs=None):
def cinepile_doc_to_text(doc, lmms_eval_specific_kwargs=None):
formatted_question = format_question_and_options(doc["question"], doc["choices"])
model_input = f"{model_specific_prompt_kwargs['pre_prompt']}\n\n**Subtitles:**\n{doc['subtitles']}\n\n{formatted_question}\n{model_specific_prompt_kwargs['post_prompt']}"
model_input = f"{lmms_eval_specific_kwargs['pre_prompt']}\n\n**Subtitles:**\n{doc['subtitles']}\n\n{formatted_question}\n{lmms_eval_specific_kwargs['post_prompt']}"
return model_input


Expand Down
2 changes: 1 addition & 1 deletion lmms_eval/tasks/moviechat/_default_template_yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ dataset_kwargs:
token: True
video: True
cache_dir: moviechat_1k_test
model_specific_prompt_kwargs:
lmms_eval_specific_kwargs:
default:
pre_prompt: "You are able to understand the visual content that the user provides.Follow the instructions carefully and explain your answers in detail."
post_prompt: ""
Expand Down
14 changes: 7 additions & 7 deletions lmms_eval/tasks/moviechat/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,15 +80,15 @@ def moviechat_doc_to_visual_breakpoint(doc):


# format the question
def moviechat_doc_to_text(doc, model_specific_prompt_kwargs=None):
if model_specific_prompt_kwargs is None:
model_specific_prompt_kwargs = {}
def moviechat_doc_to_text(doc, lmms_eval_specific_kwargs=None):
if lmms_eval_specific_kwargs is None:
lmms_eval_specific_kwargs = {}
pre_prompt = ""
post_prompt = ""
if "pre_prompt" in model_specific_prompt_kwargs:
pre_prompt = model_specific_prompt_kwargs["pre_prompt"]
if "post_prompt" in model_specific_prompt_kwargs:
post_prompt = model_specific_prompt_kwargs["post_prompt"]
if "pre_prompt" in lmms_eval_specific_kwargs:
pre_prompt = lmms_eval_specific_kwargs["pre_prompt"]
if "post_prompt" in lmms_eval_specific_kwargs:
post_prompt = lmms_eval_specific_kwargs["post_prompt"]

question = doc["question"]
return f"{pre_prompt}{question}{post_prompt}"
Expand Down
4 changes: 0 additions & 4 deletions lmms_eval/tasks/vdc/_default_template_yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,6 @@ dataset_kwargs:
token: True
video: True
cache_dir: vdc_test
lmms_eval_specific_kwargs:
default:
pre_prompt: ""
post_prompt: ""

metadata:
version: 0.0
Expand Down
10 changes: 5 additions & 5 deletions lmms_eval/tasks/vdc/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,27 +169,27 @@ def vdc_doc_to_visual(doc):


# format the prompt
def vdc_doc_to_text_short(doc, model_specific_prompt_kwargs=None):
def vdc_doc_to_text_short(doc, lmms_eval_specific_kwargs=None):
pre_prompt = random.choice(SHORT_CAPTION_PROMPTS)
return f"{pre_prompt}"


def vdc_doc_to_text_detailed(doc, model_specific_prompt_kwargs=None):
def vdc_doc_to_text_detailed(doc, lmms_eval_specific_kwargs=None):
pre_prompt = random.choice(DETAILED_CAPTION_PROMPTS)
return f"{pre_prompt}"


def vdc_doc_to_text_main_object(doc, model_specific_prompt_kwargs=None):
def vdc_doc_to_text_main_object(doc, lmms_eval_specific_kwargs=None):
pre_prompt = random.choice(MAIN_OBJECT_CAPTION_PROMPTS)
return f"{pre_prompt}"


def vdc_doc_to_text_camera(doc, model_specific_prompt_kwargs=None):
def vdc_doc_to_text_camera(doc, lmms_eval_specific_kwargs=None):
pre_prompt = random.choice(CAMERA_CAPTION_PROMPTS)
return f"{pre_prompt}"


def vdc_doc_to_text_background(doc, model_specific_prompt_kwargs=None):
def vdc_doc_to_text_background(doc, lmms_eval_specific_kwargs=None):
pre_prompt = random.choice(BACKGROUND_CAPTION_PROMPTS)
return f"{pre_prompt}"

Expand Down

0 comments on commit 2d89d69

Please sign in to comment.