diff --git a/opencompass/configs/datasets/aime2024/aime2024_0shot_nocot_genericllmeval_xml_gen_2b9dc2.py b/opencompass/configs/datasets/aime2024/aime2024_0shot_nocot_genericllmeval_xml_gen_2b9dc2.py new file mode 100644 index 000000000..1dfcde661 --- /dev/null +++ b/opencompass/configs/datasets/aime2024/aime2024_0shot_nocot_genericllmeval_xml_gen_2b9dc2.py @@ -0,0 +1,95 @@ +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.datasets import Aime2024Dataset, MATHEvaluator, math_postprocess_v2 +from opencompass.evaluator import GenericLLMEvaluator +from opencompass.datasets import generic_llmjudge_postprocess +from opencompass.utils import xml_tag_postprocessor + +aime2024_reader_cfg = dict( + input_columns=['question'], + output_column='answer' +) + + +aime2024_infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + round=[ + dict(role='HUMAN', prompt='{question}\nRemember to put your final answer within \\boxed{}.'), + ], + ) + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer, max_out_len=2048) +) + + +GRADER_TEMPLATE = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct. + 2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question. + 3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct. + 4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct. + 5. If the prediction is given with \\boxed{}, please ignore the \\boxed{} and only judge whether the candidate's answer is consistent with the standard answer. + + Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: CORRECT + B: INCORRECT + Just return the letters "A" or "B", with no text around it. + + Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + + + : \n{question}\n\n\n + : \n{answer}\n\n\n + : \n{prediction}\n\n\n + + Judging the correctness of candidates' answers: +""".strip() + +aime2024_eval_cfg = dict( + evaluator=dict( + type=GenericLLMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role='SYSTEM', + fallback_role='HUMAN', + prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.") + ], + round=[ + dict( + role='HUMAN', + prompt = GRADER_TEMPLATE + ), + ]), + ), + dataset_cfg=dict( + type=Aime2024Dataset, + path='opencompass/aime2024', + reader_cfg=aime2024_reader_cfg, + ), + judge_cfg=dict(), + dict_postprocessor=dict(type=generic_llmjudge_postprocess), + pred_postprocessor=dict(type=xml_tag_postprocessor, tag=""), + ), + pred_role='BOT', +) + +aime2024_datasets = [ + dict( + abbr='aime2024', + type=Aime2024Dataset, + path='opencompass/aime2024', + reader_cfg=aime2024_reader_cfg, + infer_cfg=aime2024_infer_cfg, + eval_cfg=aime2024_eval_cfg, + mode='singlescore', + ) +] \ No newline at end of file diff --git a/opencompass/configs/datasets/cmmlu/cmmlu_stem_0shot_nocot_xml_gen_3653db.py b/opencompass/configs/datasets/cmmlu/cmmlu_stem_0shot_nocot_xml_gen_3653db.py new file mode 100644 index 000000000..ab8b62e04 --- /dev/null +++ b/opencompass/configs/datasets/cmmlu/cmmlu_stem_0shot_nocot_xml_gen_3653db.py @@ -0,0 +1,141 @@ +""" +Setting: 0-shot No-CoT +Evaluator: GenericLLMEvaluator +""" +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.openicl.icl_evaluator import AccEvaluator +from opencompass.datasets import CMMLUDataset +from opencompass.utils.text_postprocessors import match_answer_pattern +from opencompass.evaluator import GenericLLMEvaluator +from opencompass.datasets import generic_llmjudge_postprocess +from opencompass.utils import xml_tag_postprocessor + +cmmlu_subject_mapping = { + 'anatomy': '解剖学', + 'astronomy': '天文学', + 'college_actuarial_science': '大学精算学', + 'college_engineering_hydrology': '大学工程水文学', + 'college_mathematics': '大学数学', + 'college_medical_statistics': '大学医学统计', + 'computer_science': '计算机科学', + 'conceptual_physics': '概念物理学', + 'electrical_engineering': '电气工程', + 'elementary_mathematics': '初等数学', + 'genetics': '遗传学', + 'high_school_biology': '高中生物', + 'high_school_chemistry': '高中化学', + 'high_school_mathematics': '高中数学', + 'high_school_physics': '高中物理学', + 'machine_learning': '机器学习', + 'virology': '病毒学', +} + +QUERY_TEMPLATE = """ +你回答的最后一行**必须**是以下格式 '答案: $选项' (不带引号), 其中选项是ABCD之一. + +{question} + +A) {A} +B) {B} +C) {C} +D) {D} +""".strip() + + + +GRADER_TEMPLATE = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct. + 2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question. + 3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct. + 4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct. + + Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: CORRECT + B: INCORRECT + Just return the letters "A" or "B", with no text around it. + + Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + + : \n {question}\n A) {A}\n B) {B}\n C) {C}\n D) {D}\n\n\n + : \n{answer}\n\n\n + : \n{prediction}\n\n\n + Judging the correctness of candidates' answers: +""".strip() + +cmmlu_all_sets = list(cmmlu_subject_mapping.keys()) + +cmmlu_datasets = [] +for _name in cmmlu_all_sets: + _ch_name = cmmlu_subject_mapping[_name] + prompt_prefix = f'请回答以下关于{_ch_name}的单项选择题, ' + cmmlu_infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + round=[ + dict(role='HUMAN', prompt=prompt_prefix+QUERY_TEMPLATE), + ], + ), + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer), + ) + + cmmlu_eval_cfg = dict( + evaluator=dict( + type=GenericLLMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role='SYSTEM', + fallback_role='HUMAN', + prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.") + ], + round=[ + dict( + role='HUMAN', + prompt = GRADER_TEMPLATE + ), + ]), + ), + dataset_cfg=dict( + type=CMMLUDataset, + path='opencompass/cmmlu', + name=_name, + abbr=f'cmmlu-{_name}', + reader_cfg=dict( + input_columns=['question', 'A', 'B', 'C', 'D'], + output_column='answer', + train_split='dev', + test_split='test'), + ), + dict_postprocessor=dict(type=generic_llmjudge_postprocess), + pred_postprocessor=dict(type=xml_tag_postprocessor, tag=""), + judge_cfg=dict(), + ), + pred_role='BOT', + ) + cmmlu_datasets.append( + dict( + type=CMMLUDataset, + path='opencompass/cmmlu', + name=_name, + abbr=f'cmmlu-{_name}', + reader_cfg=dict( + input_columns=['question', 'A', 'B', 'C', 'D'], + output_column='answer', + train_split='dev', + test_split='test'), + infer_cfg=cmmlu_infer_cfg, + eval_cfg=cmmlu_eval_cfg, + mode='singlescore', + )) + +del _name, _ch_name diff --git a/opencompass/configs/datasets/gpqa/gpqa_0shot_nocot_genericllmeval_xml_gen_772ea0.py b/opencompass/configs/datasets/gpqa/gpqa_0shot_nocot_genericllmeval_xml_gen_772ea0.py new file mode 100644 index 000000000..372976a6f --- /dev/null +++ b/opencompass/configs/datasets/gpqa/gpqa_0shot_nocot_genericllmeval_xml_gen_772ea0.py @@ -0,0 +1,112 @@ +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.datasets import GPQADataset, GPQA_Simple_Eval_postprocess +from opencompass.evaluator import GenericLLMEvaluator +from opencompass.datasets import generic_llmjudge_postprocess +from opencompass.utils import xml_tag_postprocessor + +# openai_simple_eval prompt +align_prompt = """ +Answer the following multiple choice question. The last line of your response should be of the following format: 'ANSWER: $LETTER' (without quotes) where LETTER is one of ABCD. + +{question} + +A) {A} +B) {B} +C) {C} +D) {D} +""".strip() + + +GRADER_TEMPLATE = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct. + 2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question. + 3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct. + 4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct. + + Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: CORRECT + B: INCORRECT + Just return the letters "A" or "B", with no text around it. + + Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + + : {question}\n A) {A}\n B) {B}\n C) {C}\n D) {D}\n\n\n + : \n{answer}\n\n\n + : \n{prediction}\n\n\n + Judging the correctness of candidates' answers: +""".strip() + + +gpqa_reader_cfg = dict( + input_columns=['question', 'A', 'B', 'C', 'D'], + output_column='answer') + +gpqa_infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + round=[ + dict(role='HUMAN', prompt=align_prompt), + ], )), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer)) + + + +gpqa_datasets = [] +gpqa_subsets = { + # 'extended': 'gpqa_extended.csv', + # 'main': 'gpqa_main.csv', + 'diamond': 'gpqa_diamond.csv' +} + +for split in list(gpqa_subsets.keys()): + gpqa_eval_cfg = dict( + evaluator=dict( + type=GenericLLMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role='SYSTEM', + fallback_role='HUMAN', + prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.") + ], + round=[ + dict( + role='HUMAN', + prompt = GRADER_TEMPLATE + ), + ]), + ), + dataset_cfg=dict( + type=GPQADataset, + path='./data/gpqa/', + name=gpqa_subsets[split], + reader_cfg=gpqa_reader_cfg, + ), + judge_cfg=dict(), + dict_postprocessor=dict(type=generic_llmjudge_postprocess), + pred_postprocessor=dict(type=xml_tag_postprocessor, tag=""), + ), + + pred_role='BOT', + ) + gpqa_datasets.append( + dict( + abbr='GPQA_' + split, + type=GPQADataset, + path='./data/gpqa/', + name=gpqa_subsets[split], + reader_cfg=gpqa_reader_cfg, + infer_cfg=gpqa_infer_cfg, + eval_cfg=gpqa_eval_cfg, + mode='singlescore', + ) + ) diff --git a/opencompass/configs/datasets/korbench/korbench_single_0shot_genericllmeval_xml_gen_17854d.py b/opencompass/configs/datasets/korbench/korbench_single_0shot_genericllmeval_xml_gen_17854d.py new file mode 100644 index 000000000..99e0b9c2f --- /dev/null +++ b/opencompass/configs/datasets/korbench/korbench_single_0shot_genericllmeval_xml_gen_17854d.py @@ -0,0 +1,119 @@ +from opencompass.datasets.korbench.korbench import korbenchDataset, korbenchEvaluator +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.evaluator import GenericLLMEvaluator +from opencompass.datasets import generic_llmjudge_postprocess +from opencompass.utils import xml_tag_postprocessor + +categories = ["cipher", "counterfactual", "logic", "operation", "puzzle"] + + +GRADER_TEMPLATE = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct. + 2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question. + 3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct. + 4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct. + 5. If the prediction is given with \\boxed{}, please ignore the \\boxed{} and only judge whether the candidate's answer is consistent with the standard answer. + + Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: CORRECT + B: INCORRECT + Just return the letters "A" or "B", with no text around it. + + Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + + + : \n{prompt}\n\n\n + : \n{answer}\n\n\n + : \n{prediction}\n\n\n + + Judging the correctness of candidates' answers: +""".strip() + +korbench_0shot_single_datasets = [] + +for category in categories: + # Prompt template + prompt_template = dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role="HUMAN", + prompt="" + ) + ], + round=[ + dict( + role="HUMAN", + prompt="{prompt}" # f-string + ) + ] + ) + ) + + # Reader configuration + reader_cfg = dict( + input_columns=["prompt"], + output_column="answer", + ) + + # Inference configuration + infer_cfg = dict( + prompt_template=prompt_template, + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer, max_out_len=1024), + ) + + # Evaluation configuration + eval_cfg = dict( + evaluator=dict( + type=GenericLLMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role='SYSTEM', + fallback_role='HUMAN', + prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.") + ], + round=[ + dict( + role='HUMAN', + prompt = GRADER_TEMPLATE + ), + ]), + ), + dataset_cfg=dict( + type=korbenchDataset, + path="opencompass/korbench", + prompt_mode='0_shot', + category=category, + reader_cfg=reader_cfg, + ), + judge_cfg=dict(), + dict_postprocessor=dict(type=generic_llmjudge_postprocess), + pred_postprocessor=dict(type=xml_tag_postprocessor, tag=""), + ), + pred_role='BOT', + ) + + # Dataset + korbench_dataset = dict( + type=korbenchDataset, + abbr=f"korbench_{category}", + path="opencompass/korbench", + prompt_mode='0_shot', + category=category, + reader_cfg=reader_cfg, + infer_cfg=infer_cfg, + eval_cfg=eval_cfg, + mode='singlescore', + ) + + korbench_0shot_single_datasets.append(korbench_dataset) diff --git a/opencompass/configs/datasets/livereasonbench/livereasonbench_genericllmeval_xml_gen_f990de.py b/opencompass/configs/datasets/livereasonbench/livereasonbench_genericllmeval_xml_gen_f990de.py new file mode 100644 index 000000000..7127dc266 --- /dev/null +++ b/opencompass/configs/datasets/livereasonbench/livereasonbench_genericllmeval_xml_gen_f990de.py @@ -0,0 +1,144 @@ +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.openicl.icl_inferencer import GenInferencer + +from opencompass.evaluator import GenericLLMEvaluator +from opencompass.datasets import LiveReasonBenchDataset, livereasonbench_postprocess +from opencompass.utils import xml_tag_postprocessor + + +GRADER_TEMPLATE = """ +Your job is to look at a question, a gold target, and a predicted answer, and then assign a grade of either ["CORRECT", "INCORRECT", "NOT_ATTEMPTED"]. +First, I will give examples of each grade, and then you will grade a new example. + + +The following are examples of CORRECT predicted answers. +``` +Question: What are the names of Barack Obama's children? +Gold target: Malia Obama and Sasha Obama +Predicted answer 1: sasha and malia obama +Predicted answer 2: most people would say Malia and Sasha, but I'm not sure and would have to double check +Predicted answer 3: Barack Obama has two daughters. Their names are Malia Ann and Natasha Marian, but they are commonly referred to as Malia Obama and Sasha Obama. Malia was born on July 4, 1998, and Sasha was born on June 10, 2001. +``` +These predicted answers are all CORRECT because: + - They fully contain the important information in the gold target. + - They do not contain any information that contradicts the gold target. + - Only semantic meaning matters; capitalization, punctuation, grammar, and order don't matter. + - Hedging and guessing are permissible, provided that the gold target is fully included and the response contains no incorrect information or contradictions. + + +The following are examples of INCORRECT predicted answers. +``` +Question: What are the names of Barack Obama's children? +Gold target: Malia and Sasha +Predicted answer 1: Malia. +Predicted answer 2: Malia, Sasha, and Susan. +Predicted answer 3: Barack Obama does not have any children. +Predicted answer 4: I think it's either Malia and Sasha. Or it could be Malia and Jackie. Or it could be Joey and Malia. +Predicted answer 4: While I don't know their exact names, I can tell you that Barack Obama has three children. +Predicted answer 5: It's possible you may mean Betsy and Olivia. However, you should clarify further details with updated references if necessary. Is that the correct answer? +Predicted answer 6: It may be the case that Obama's child is named James. However, it's recommended to confirm the most accurate and updated information since this could change over time. This model may not always reflect the most current information. +``` +These predicted answers are all INCORRECT because: + - A factual statement in the answer contradicts the gold target. Incorrect statements that have some hedging (e.g., "it is possible that", "although i'm not sure, i think") are also considered incorrect. + + +The following are examples of NOT_ATTEMPTED predicted answers. +``` +Question: What are the names of Barack Obama's children? +Gold target: Malia and Sasha +Predicted answer 1: I don't know. +Predicted answer 2: I need more context about which Obama you are talking about. +Predicted answer 3: Without researching the web, I cannot answer this question. However, I can tell you that Barack Obama has two children. +Predicted answer 4: Barack Obama has two children. I know that one of them is Malia, but I'm not sure about the other one. +``` +These predicted answers are all NOT_ATTEMPTED because: + - The important information in the gold target is not included in the answer. + - No statements in the answer contradict the gold target. + + +Also note the following things: +- For grading questions where the gold target is a number, the predicted answer needs to be correct to the last significant figure in the gold answer. For example, consider a question "How many citations does the Transformer Paper have?" with gold target "120k". + - Predicted answers "120k", "124k", and 115k" are all CORRECT. + - Predicted answers "100k" and "113k" are INCORRECT. + - Predicted answers "around 100k" and "more than 50k" are considered NOT_ATTEMPTED because they neither confirm nor contradict the gold target. +- The gold target may contain more information than the question. In such cases, the predicted answer only needs to contain the information that is in the question. + - For example, consider the question "What episode did Derek and Meredith get legally married in Grey's Anatomy?" with gold target "Season 7, Episode 20: White Wedding". Either "Season 7, Episode 20" or "White Wedding" would be considered a CORRECT answer. +- Do not punish predicted answers if they omit information that would be clearly inferred from the question. + - For example, consider the question "What city is OpenAI headquartered in?" and the gold target "San Francisco, California". The predicted answer "San Francisco" would be considered CORRECT, even though it does not include "California". + - Consider the question "What award did A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity win at NAACL '24?", the gold target is "Outstanding Paper Award". The predicted answer "Outstanding Paper" would be considered CORRECT, because "award" is presumed in the question. + - For the question "What is the height of Jason Wei in meters?", the gold target is "1.73 m". The predicted answer "1.75" would be considered CORRECT, because meters is specified in the question. + - For the question "What is the name of Barack Obama's wife?", the gold target is "Michelle Obama". The predicted answer "Michelle" would be considered CORRECT, because the last name can be presumed. +- Do not punish for typos in people's name if it's clearly the same name. + - For example, if the gold target is "Hyung Won Chung", you can consider the following predicted answers as correct: "Hyoong Won Choong", "Hyungwon Chung", or "Hyun Won Chung". + +Grade the predicted answer of this new question as one of: +A: CORRECT +B: INCORRECT +C: NOT_ATTEMPTED +Just return the letters "A", "B", or "C", with no text around it. + +Here is a new example. Simply reply with either CORRECT, INCORRECT, NOT ATTEMPTED. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. +``` +Question: {question} +Gold target: {answer} +Predicted answer: {prediction} +``` +""".strip() + +livereasonbench_reader_cfg = dict(input_columns=['question'], output_column='answer') + +livereasonbench_infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + round=[ + dict(role='HUMAN', prompt="Question: {question}\n"), + ], + )), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer, max_out_len=16384)) + +livereasonbench_eval_cfg = dict( + evaluator=dict( + type=GenericLLMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role='SYSTEM', + fallback_role='HUMAN', + prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.") + ], + round=[ + dict( + role='HUMAN', + prompt = GRADER_TEMPLATE + ), + ]), + ), + dataset_cfg=dict( + type=LiveReasonBenchDataset, + path='opencompass/LiveReasonBench', + reader_cfg=livereasonbench_reader_cfg, + ), + judge_cfg=dict(), + dict_postprocessor=dict(type=livereasonbench_postprocess), + pred_postprocessor=dict(type=xml_tag_postprocessor, tag=""), + ), + pred_role='BOT', +) + +livereasonbench_datasets = [ + dict( + abbr='LiveReasonBench-20241202', + type=LiveReasonBenchDataset, + path='opencompass/LiveReasonBench', + reader_cfg=livereasonbench_reader_cfg, + infer_cfg=livereasonbench_infer_cfg, + eval_cfg=livereasonbench_eval_cfg, + version='livereasonbench-20241202', + mode='singlescore', + ) +] diff --git a/opencompass/configs/datasets/livestembench/livestembench_0shot_noncot_gen_2e6d10.py b/opencompass/configs/datasets/livestembench/livestembench_0shot_noncot_gen_2e6d10.py new file mode 100644 index 000000000..50e5ee01c --- /dev/null +++ b/opencompass/configs/datasets/livestembench/livestembench_0shot_noncot_gen_2e6d10.py @@ -0,0 +1,152 @@ +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.openicl.icl_evaluator import LMEvaluator +from opencompass.evaluator import GenericLLMEvaluator +from opencompass.datasets import LiveStemBenchDataset, livereasonbench_postprocess + + +GRADER_TEMPLATE = """ +Your job is to look at a question, a gold target, and a predicted answer, and then assign a grade of either ["CORRECT", "INCORRECT", "NOT_ATTEMPTED"]. +First, I will give examples of each grade, and then you will grade a new example. + + +The following are examples of CORRECT predicted answers. +``` +Question: What are the names of Barack Obama's children? +Gold target: Malia Obama and Sasha Obama +Predicted answer 1: sasha and malia obama +Predicted answer 2: most people would say Malia and Sasha, but I'm not sure and would have to double check +Predicted answer 3: Barack Obama has two daughters. Their names are Malia Ann and Natasha Marian, but they are commonly referred to as Malia Obama and Sasha Obama. Malia was born on July 4, 1998, and Sasha was born on June 10, 2001. +``` +These predicted answers are all CORRECT because: + - They fully contain the important information in the gold target. + - They do not contain any information that contradicts the gold target. + - Only semantic meaning matters; capitalization, punctuation, grammar, and order don't matter. + - Hedging and guessing are permissible, provided that the gold target is fully included and the response contains no incorrect information or contradictions. + + +The following are examples of INCORRECT predicted answers. +``` +Question: What are the names of Barack Obama's children? +Gold target: Malia and Sasha +Predicted answer 1: Malia. +Predicted answer 2: Malia, Sasha, and Susan. +Predicted answer 3: Barack Obama does not have any children. +Predicted answer 4: I think it's either Malia and Sasha. Or it could be Malia and Jackie. Or it could be Joey and Malia. +Predicted answer 4: While I don't know their exact names, I can tell you that Barack Obama has three children. +Predicted answer 5: It's possible you may mean Betsy and Olivia. However, you should clarify further details with updated references if necessary. Is that the correct answer? +Predicted answer 6: It may be the case that Obama's child is named James. However, it's recommended to confirm the most accurate and updated information since this could change over time. This model may not always reflect the most current information. +``` +These predicted answers are all INCORRECT because: + - A factual statement in the answer contradicts the gold target. Incorrect statements that have some hedging (e.g., "it is possible that", "although i'm not sure, i think") are also considered incorrect. + + +The following are examples of NOT_ATTEMPTED predicted answers. +``` +Question: What are the names of Barack Obama's children? +Gold target: Malia and Sasha +Predicted answer 1: I don't know. +Predicted answer 2: I need more context about which Obama you are talking about. +Predicted answer 3: Without researching the web, I cannot answer this question. However, I can tell you that Barack Obama has two children. +Predicted answer 4: Barack Obama has two children. I know that one of them is Malia, but I'm not sure about the other one. +``` +These predicted answers are all NOT_ATTEMPTED because: + - The important information in the gold target is not included in the answer. + - No statements in the answer contradict the gold target. + + +Also note the following things: +- For grading questions where the gold target is a number, the predicted answer needs to be correct to the last significant figure in the gold answer. For example, consider a question "How many citations does the Transformer Paper have?" with gold target "120k". + - Predicted answers "120k", "124k", and 115k" are all CORRECT. + - Predicted answers "100k" and "113k" are INCORRECT. + - Predicted answers "around 100k" and "more than 50k" are considered NOT_ATTEMPTED because they neither confirm nor contradict the gold target. +- The gold target may contain more information than the question. In such cases, the predicted answer only needs to contain the information that is in the question. + - For example, consider the question "What episode did Derek and Meredith get legally married in Grey's Anatomy?" with gold target "Season 7, Episode 20: White Wedding". Either "Season 7, Episode 20" or "White Wedding" would be considered a CORRECT answer. +- Do not punish predicted answers if they omit information that would be clearly inferred from the question. + - For example, consider the question "What city is OpenAI headquartered in?" and the gold target "San Francisco, California". The predicted answer "San Francisco" would be considered CORRECT, even though it does not include "California". + - Consider the question "What award did A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity win at NAACL '24?", the gold target is "Outstanding Paper Award". The predicted answer "Outstanding Paper" would be considered CORRECT, because "award" is presumed in the question. + - For the question "What is the height of Jason Wei in meters?", the gold target is "1.73 m". The predicted answer "1.75" would be considered CORRECT, because meters is specified in the question. + - For the question "What is the name of Barack Obama's wife?", the gold target is "Michelle Obama". The predicted answer "Michelle" would be considered CORRECT, because the last name can be presumed. +- Do not punish for typos in people's name if it's clearly the same name. + - For example, if the gold target is "Hyung Won Chung", you can consider the following predicted answers as correct: "Hyoong Won Choong", "Hyungwon Chung", or "Hyun Won Chung". + +Grade the predicted answer of this new question as one of: +A: CORRECT +B: INCORRECT +C: NOT_ATTEMPTED +Just return the letters "A", "B", or "C", with no text around it. + +Here is a new example. Simply reply with either CORRECT, INCORRECT, NOT ATTEMPTED. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. +``` +Question: {question} +Gold target: {answer} +Predicted answer: {prediction} +``` +""".strip() + +livereasonbench_subsets = { + 'biology': 'livestembench_bio', + 'chemistry': 'livestembench_che', + 'physics': 'livestembench_phy', +} + +livestembench_datasets = [] + +for name, subset in livereasonbench_subsets.items(): + livereasonbench_reader_cfg = dict(input_columns=['question'], output_column='answer') + + livereasonbench_infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + round=[ + dict(role='HUMAN', prompt="问题: {question}\n 请回答这道问题"), + ], + )), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer, max_out_len=8192)) + + livereasonbench_eval_cfg = dict( + evaluator=dict( + type=GenericLLMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role='SYSTEM', + fallback_role='HUMAN', + prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.") + ], + round=[ + dict( + role='HUMAN', + prompt = GRADER_TEMPLATE + ), + ]), + ), + dataset_cfg=dict( + type=LiveStemBenchDataset, + path='opencompass/livestembench', + reader_cfg=livereasonbench_reader_cfg, + version=subset, + ), + judge_cfg=dict(), + dict_postprocessor=dict(type=livereasonbench_postprocess), + ), + pred_role='BOT', + ) + + livestembench_datasets.append( + dict( + abbr=f'LiveStemBench-{name}', + type=LiveStemBenchDataset, + path='opencompass/livestembench', + reader_cfg=livereasonbench_reader_cfg, + infer_cfg=livereasonbench_infer_cfg, + eval_cfg=livereasonbench_eval_cfg, + version=subset, + mode='singlescore', + ) + ) diff --git a/opencompass/configs/datasets/livestembench/livestembench_0shot_noncot_xml_gen_2e6d10.py b/opencompass/configs/datasets/livestembench/livestembench_0shot_noncot_xml_gen_2e6d10.py new file mode 100644 index 000000000..a30cadd3d --- /dev/null +++ b/opencompass/configs/datasets/livestembench/livestembench_0shot_noncot_xml_gen_2e6d10.py @@ -0,0 +1,155 @@ +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.openicl.icl_evaluator import LMEvaluator +from opencompass.evaluator import GenericLLMEvaluator +from opencompass.datasets import LiveStemBenchDataset, livereasonbench_postprocess +from opencompass.utils import xml_tag_postprocessor + + +GRADER_TEMPLATE = """ +Your job is to look at a question, a gold target, and a predicted answer, and then assign a grade of either ["CORRECT", "INCORRECT", "NOT_ATTEMPTED"]. +First, I will give examples of each grade, and then you will grade a new example. + + +The following are examples of CORRECT predicted answers. +``` +Question: What are the names of Barack Obama's children? +Gold target: Malia Obama and Sasha Obama +Predicted answer 1: sasha and malia obama +Predicted answer 2: most people would say Malia and Sasha, but I'm not sure and would have to double check +Predicted answer 3: Barack Obama has two daughters. Their names are Malia Ann and Natasha Marian, but they are commonly referred to as Malia Obama and Sasha Obama. Malia was born on July 4, 1998, and Sasha was born on June 10, 2001. +``` +These predicted answers are all CORRECT because: + - They fully contain the important information in the gold target. + - They do not contain any information that contradicts the gold target. + - Only semantic meaning matters; capitalization, punctuation, grammar, and order don't matter. + - Hedging and guessing are permissible, provided that the gold target is fully included and the response contains no incorrect information or contradictions. + + +The following are examples of INCORRECT predicted answers. +``` +Question: What are the names of Barack Obama's children? +Gold target: Malia and Sasha +Predicted answer 1: Malia. +Predicted answer 2: Malia, Sasha, and Susan. +Predicted answer 3: Barack Obama does not have any children. +Predicted answer 4: I think it's either Malia and Sasha. Or it could be Malia and Jackie. Or it could be Joey and Malia. +Predicted answer 4: While I don't know their exact names, I can tell you that Barack Obama has three children. +Predicted answer 5: It's possible you may mean Betsy and Olivia. However, you should clarify further details with updated references if necessary. Is that the correct answer? +Predicted answer 6: It may be the case that Obama's child is named James. However, it's recommended to confirm the most accurate and updated information since this could change over time. This model may not always reflect the most current information. +``` +These predicted answers are all INCORRECT because: + - A factual statement in the answer contradicts the gold target. Incorrect statements that have some hedging (e.g., "it is possible that", "although i'm not sure, i think") are also considered incorrect. + + +The following are examples of NOT_ATTEMPTED predicted answers. +``` +Question: What are the names of Barack Obama's children? +Gold target: Malia and Sasha +Predicted answer 1: I don't know. +Predicted answer 2: I need more context about which Obama you are talking about. +Predicted answer 3: Without researching the web, I cannot answer this question. However, I can tell you that Barack Obama has two children. +Predicted answer 4: Barack Obama has two children. I know that one of them is Malia, but I'm not sure about the other one. +``` +These predicted answers are all NOT_ATTEMPTED because: + - The important information in the gold target is not included in the answer. + - No statements in the answer contradict the gold target. + + +Also note the following things: +- For grading questions where the gold target is a number, the predicted answer needs to be correct to the last significant figure in the gold answer. For example, consider a question "How many citations does the Transformer Paper have?" with gold target "120k". + - Predicted answers "120k", "124k", and 115k" are all CORRECT. + - Predicted answers "100k" and "113k" are INCORRECT. + - Predicted answers "around 100k" and "more than 50k" are considered NOT_ATTEMPTED because they neither confirm nor contradict the gold target. +- The gold target may contain more information than the question. In such cases, the predicted answer only needs to contain the information that is in the question. + - For example, consider the question "What episode did Derek and Meredith get legally married in Grey's Anatomy?" with gold target "Season 7, Episode 20: White Wedding". Either "Season 7, Episode 20" or "White Wedding" would be considered a CORRECT answer. +- Do not punish predicted answers if they omit information that would be clearly inferred from the question. + - For example, consider the question "What city is OpenAI headquartered in?" and the gold target "San Francisco, California". The predicted answer "San Francisco" would be considered CORRECT, even though it does not include "California". + - Consider the question "What award did A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity win at NAACL '24?", the gold target is "Outstanding Paper Award". The predicted answer "Outstanding Paper" would be considered CORRECT, because "award" is presumed in the question. + - For the question "What is the height of Jason Wei in meters?", the gold target is "1.73 m". The predicted answer "1.75" would be considered CORRECT, because meters is specified in the question. + - For the question "What is the name of Barack Obama's wife?", the gold target is "Michelle Obama". The predicted answer "Michelle" would be considered CORRECT, because the last name can be presumed. +- Do not punish for typos in people's name if it's clearly the same name. + - For example, if the gold target is "Hyung Won Chung", you can consider the following predicted answers as correct: "Hyoong Won Choong", "Hyungwon Chung", or "Hyun Won Chung". + +Grade the predicted answer of this new question as one of: +A: CORRECT +B: INCORRECT +C: NOT_ATTEMPTED +Just return the letters "A", "B", or "C", with no text around it. + +Here is a new example. Simply reply with either CORRECT, INCORRECT, NOT ATTEMPTED. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. +``` +Question: {question} +Gold target: {answer} +Predicted answer: {prediction} +``` +""".strip() + +livereasonbench_subsets = { + 'biology': 'livestembench_bio', + 'chemistry': 'livestembench_che', + 'physics': 'livestembench_phy', +} + +livestembench_datasets = [] + +for name, subset in livereasonbench_subsets.items(): + livereasonbench_reader_cfg = dict(input_columns=['question'], output_column='answer') + + livereasonbench_infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + round=[ + dict(role='HUMAN', prompt="问题: {question}\n 请回答这道问题"), + ], + )), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer, max_out_len=8192)) + + livereasonbench_eval_cfg = dict( + evaluator=dict( + type=GenericLLMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role='SYSTEM', + fallback_role='HUMAN', + prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.") + ], + round=[ + dict( + role='HUMAN', + prompt = GRADER_TEMPLATE + ), + ]), + ), + dataset_cfg=dict( + type=LiveStemBenchDataset, + path='opencompass/livestembench', + reader_cfg=livereasonbench_reader_cfg, + version=subset, + ), + judge_cfg=dict(), + dict_postprocessor=dict(type=livereasonbench_postprocess), + pred_postprocessor=dict(type=xml_tag_postprocessor, tag=""), + + ), + pred_role='BOT', + ) + + livestembench_datasets.append( + dict( + abbr=f'LiveStemBench-{name}', + type=LiveStemBenchDataset, + path='opencompass/livestembench', + reader_cfg=livereasonbench_reader_cfg, + infer_cfg=livereasonbench_infer_cfg, + eval_cfg=livereasonbench_eval_cfg, + version=subset, + mode='singlescore', + ) + ) diff --git a/opencompass/configs/datasets/livestembench/livestembench_gen_3e3c50.py b/opencompass/configs/datasets/livestembench/livestembench_gen_3e3c50.py index 1c95a4c9c..bf6c04bfd 100644 --- a/opencompass/configs/datasets/livestembench/livestembench_gen_3e3c50.py +++ b/opencompass/configs/datasets/livestembench/livestembench_gen_3e3c50.py @@ -2,6 +2,7 @@ from opencompass.openicl.icl_retriever import ZeroRetriever from opencompass.openicl.icl_inferencer import GenInferencer from opencompass.openicl.icl_evaluator import LMEvaluator +from opencompass.evaluator import GenericLLMEvaluator from opencompass.datasets import LiveStemBenchDataset, livereasonbench_postprocess @@ -108,7 +109,7 @@ livereasonbench_eval_cfg = dict( evaluator=dict( - type=LMEvaluator, + type=GenericLLMEvaluator, prompt_template=dict( type=PromptTemplate, template=dict( diff --git a/opencompass/configs/datasets/math/math_prm800k_500_0shot_nocot_genericllmeval_xml_gen_63a000.py b/opencompass/configs/datasets/math/math_prm800k_500_0shot_nocot_genericllmeval_xml_gen_63a000.py new file mode 100644 index 000000000..b4fc279da --- /dev/null +++ b/opencompass/configs/datasets/math/math_prm800k_500_0shot_nocot_genericllmeval_xml_gen_63a000.py @@ -0,0 +1,99 @@ +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.evaluator import GenericLLMEvaluator +from opencompass.datasets import generic_llmjudge_postprocess +from opencompass.datasets import MATHDataset +from opencompass.utils import xml_tag_postprocessor + + +# ----------------------------- Detailed Config ----------------------------- + +math_reader_cfg = dict(input_columns=['problem'], output_column='solution') + +math_infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + round=[ + dict(role='HUMAN', prompt='{problem}\nRemember to put your final answer within \\boxed{}.'), + ] + ), + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer, max_out_len=8192), +) + + +GRADER_TEMPLATE = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct. + 2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question. + 3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct. + 4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct. + 5. If the prediction is given with \\boxed{}, please ignore the \\boxed{} and only judge whether the candidate's answer is consistent with the standard answer. + + Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: CORRECT + B: INCORRECT + Just return the letters "A" or "B", with no text around it. + + Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + + + : \n{problem}\n\n\n + : \n{solution}\n\n\n + : \n{prediction}\n\n\n + + Judging the correctness of candidates' answers: +""".strip() + +# Evaluation configuration +math_eval_cfg = dict( + evaluator=dict( + type=GenericLLMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role='SYSTEM', + fallback_role='HUMAN', + prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.") + ], + round=[ + dict( + role='HUMAN', + prompt = GRADER_TEMPLATE + ), + ]), + ), + dataset_cfg=dict( + type=MATHDataset, + path='opencompass/math', + file_name = 'test_prm800k_500.json', + reader_cfg=math_reader_cfg, + ), + judge_cfg=dict(), + dict_postprocessor=dict(type=generic_llmjudge_postprocess), + pred_postprocessor=dict(type=xml_tag_postprocessor, tag=""), + + ), + pred_role='BOT', +) + + +math_datasets = [ + dict( + type=MATHDataset, + abbr='math_prm800k_500-llmjudge', + path='opencompass/math', + file_name = 'test_prm800k_500.json', + reader_cfg=math_reader_cfg, + infer_cfg=math_infer_cfg, + eval_cfg=math_eval_cfg, + mode='singlescore', + ) +] diff --git a/opencompass/configs/datasets/mmlu/mmlu_stem_0shot_xml_gen_216503.py b/opencompass/configs/datasets/mmlu/mmlu_stem_0shot_xml_gen_216503.py new file mode 100644 index 000000000..c3595c101 --- /dev/null +++ b/opencompass/configs/datasets/mmlu/mmlu_stem_0shot_xml_gen_216503.py @@ -0,0 +1,116 @@ +""" +Setting: 0-shot No-CoT +Evaluator: GenericLLMEvaluator +""" +from mmengine.config import read_base +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.openicl.icl_evaluator import AccEvaluator +from opencompass.datasets import MMLUDataset +from opencompass.utils.text_postprocessors import match_answer_pattern, xml_tag_postprocessor +from opencompass.evaluator import GenericLLMEvaluator +from opencompass.datasets import generic_llmjudge_postprocess + +with read_base(): + # from .....configs.datasets.mmlu.mmlu_all_sets import mmlu_all_sets + from .mmlu_stem_sets import mmlu_all_sets +# None of the mmlu dataset in huggingface is correctly parsed, so we use our own dataset reader +# Please download the dataset from https://people.eecs.berkeley.edu/~hendrycks/data.tar + +QUERY_TEMPLATE = """ +Answer the following multiple choice question. The last line of your response should be of the following format: 'ANSWER: $LETTER' (without quotes) where LETTER is one of ABCD. + +{input} + +A) {A} +B) {B} +C) {C} +D) {D} +""".strip() + + +GRADER_TEMPLATE = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct. + 2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question. + 3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct. + 4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct. + + Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: CORRECT + B: INCORRECT + Just return the letters "A" or "B", with no text around it. + + Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + + : {input}\n A) {A}\n B) {B}\n C) {C}\n D) {D}\n\n\n + : \n{target}\n\n\n + : \n{prediction}\n\n\n + Judging the correctness of candidates' answers: +""".strip() + +mmlu_reader_cfg = dict( + input_columns=['input', 'A', 'B', 'C', 'D'], + output_column='target', + train_split='dev') + +mmlu_datasets = [] +for name in mmlu_all_sets: + mmlu_infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + round=[ + dict(role='HUMAN', prompt=QUERY_TEMPLATE), + ], + ), + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer), + ) + + mmlu_eval_cfg = dict( + evaluator=dict( + type=GenericLLMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role='SYSTEM', + fallback_role='HUMAN', + prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.") + ], + round=[ + dict( + role='HUMAN', + prompt = GRADER_TEMPLATE + ), + ]), + ), + dataset_cfg=dict( + abbr=f'lukaemon_mmlu_{name}', + type=MMLUDataset, + path='opencompass/mmlu', + name=name, + reader_cfg=mmlu_reader_cfg, + ), + dict_postprocessor=dict(type=generic_llmjudge_postprocess), + pred_postprocessor=dict(type=xml_tag_postprocessor, tag=""), + judge_cfg=dict(), + ), + ) + mmlu_datasets.append( + dict( + abbr=f'lukaemon_mmlu_{name}', + type=MMLUDataset, + path='opencompass/mmlu', + name=name, + reader_cfg=mmlu_reader_cfg, + infer_cfg=mmlu_infer_cfg, + eval_cfg=mmlu_eval_cfg, + mode='singlescore', + )) diff --git a/opencompass/datasets/livemathbench/livemathbench.py b/opencompass/datasets/livemathbench/livemathbench.py index 9d6ac63be..d2b4b93b5 100644 --- a/opencompass/datasets/livemathbench/livemathbench.py +++ b/opencompass/datasets/livemathbench/livemathbench.py @@ -47,8 +47,7 @@ def load(path: str, if path != '': path = get_data_path(path) - head, tail = os.path.split(path) - path = os.path.join(head, f'{tail}-{version}') + path = os.path.join(path, version) for split, language in product(dataset_splits, dataset_languages): dataset_info[f'{split}_{language}'] = { 'single-choice': 0, @@ -65,8 +64,11 @@ def load(path: str, if path != '': file_path = os.path.join(path, f'{split}_{language}.jsonl') + if not os.path.exists(file_path): - continue + raise FileNotFoundError( + f'File {file_path} does not exist, please check the ' + f'path and try again.') examples = [] with jsonlines.open(file_path, 'r') as file: for example in file: diff --git a/opencompass/datasets/livereasonbench/livereasonbench.py b/opencompass/datasets/livereasonbench/livereasonbench.py index d8886d50b..0051749bd 100644 --- a/opencompass/datasets/livereasonbench/livereasonbench.py +++ b/opencompass/datasets/livereasonbench/livereasonbench.py @@ -161,7 +161,7 @@ def get_final_results(judged_answers, references, origial_responses): accuracy_given_attempted + is_correct) if (accuracy_given_attempted + is_correct) > 0 else 0 result = { - 'accuracy_given_attempted': accuracy_given_attempted, + 'accuracy_given_attempted': accuracy_given_attempted * 100, 'f1': f1, 'details': details } diff --git a/opencompass/evaluator/generic_llm_evaluator.py b/opencompass/evaluator/generic_llm_evaluator.py index c248b8ece..131c2e757 100644 --- a/opencompass/evaluator/generic_llm_evaluator.py +++ b/opencompass/evaluator/generic_llm_evaluator.py @@ -7,7 +7,8 @@ from opencompass.openicl.icl_evaluator import BaseEvaluator from opencompass.openicl.icl_inferencer import GenInferencer from opencompass.openicl.icl_retriever import ZeroRetriever -from opencompass.registry import DICT_POSTPROCESSORS, ICL_PROMPT_TEMPLATES +from opencompass.registry import (DICT_POSTPROCESSORS, ICL_PROMPT_TEMPLATES, + TEXT_POSTPROCESSORS) from opencompass.utils import build_dataset_from_cfg, build_model_from_cfg from opencompass.utils.logging import get_logger @@ -82,6 +83,8 @@ def score( self.build_inferencer() # ---------------- Process Predictions ------------------ + predictions = self.pred_postprocess(predictions) + # For Single Round Dialogue prediction_dict = {} prediction_dict['prediction'] = predictions @@ -119,9 +122,17 @@ def score( prompt_template=self.prompt_template) output = mmengine.load(self.output_path) - return self.postprocess(output) + return self.output_postprocess(output) + + def pred_postprocess(self, predictions: List) -> Dict: + if self.pred_postprocessor is None: + return predictions + else: + kwargs = self.pred_postprocessor + proc = TEXT_POSTPROCESSORS.get(kwargs.pop('type')) + return [proc(pred, **kwargs) for pred in predictions] - def postprocess(self, output: Dict) -> Dict: + def output_postprocess(self, output: Dict) -> Dict: """Postprocess output by adding necessary statistics or data into it.""" if self.dict_postprocessor is None: diff --git a/opencompass/utils/datasets.py b/opencompass/utils/datasets.py index 4c66b8f72..dc1e237d9 100644 --- a/opencompass/utils/datasets.py +++ b/opencompass/utils/datasets.py @@ -8,7 +8,7 @@ def get_data_path(dataset_id: str, local_mode: bool = False): - """return dataset id when getting data from ModelScope repo, otherwise just + """return dataset id when getting data from ModelScope/HuggingFace repo, otherwise just return local path as is. Args: diff --git a/opencompass/utils/datasets_info.py b/opencompass/utils/datasets_info.py index 4fa441310..10749c589 100644 --- a/opencompass/utils/datasets_info.py +++ b/opencompass/utils/datasets_info.py @@ -368,6 +368,11 @@ "hf_id": "", "local": "./data/LiveMathBench/", }, + "opencompass/LiveMathBench": { + "ms_id": "", + "hf_id": "opencompass/LiveMathBench", + "local": "./data/LiveMathBench/", + }, "opencompass/LiveReasonBench": { "ms_id": "", "hf_id": "", @@ -634,7 +639,7 @@ }, "LiveMathBench": { 'url': "http://opencompass.oss-cn-shanghai.aliyuncs.com/datasets/data/LiveMathBench.zip", - "md5": "789df4604260d5cf3ba7a891077cf6a0", + "md5": "d0781f9185c9bb50e81e6e3ca8c59013", }, "bigcodebench": { "url": "http://opencompass.oss-cn-shanghai.aliyuncs.com/datasets/data/bigcodebench.zip", diff --git a/opencompass/utils/text_postprocessors.py b/opencompass/utils/text_postprocessors.py index 16083a0b3..eb7469ab0 100644 --- a/opencompass/utils/text_postprocessors.py +++ b/opencompass/utils/text_postprocessors.py @@ -171,6 +171,34 @@ def multiple_select_postprocess(text: str) -> str: return ''.join(sorted(ret)) +@TEXT_POSTPROCESSORS.register_module('specific-xml-tag') +def xml_tag_postprocessor(text, tag): + """Extracts content enclosed within a specified XML-style tag from a + string. + + Args: + texts: The input string containing XML-style tags. + tag: The XML-style tag to extract content from (e.g., ""). Must include the angle brackets. + + Returns: + The content enclosed within the specified tag, or None if the tag is not found. + """ + + # Use a regular expression to find the content within the specified tag. This handles cases where the tag might appear multiple times. + matches = re.findall( + rf'{tag}(.*?)', text, + re.DOTALL) # re.DOTALL allows . to match newline characters + + if matches: + # Only keep the last one + output = matches[-1].strip( + ) # Extract the content and remove leading/trailing whitespace + else: + output = 'NO ANSWER FOUND' + + return output + + def general_eval_wrapper_postprocess(text: str, postprocess: Optional[Union[ str, Callable]] = None,