Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add mistral asset TR #353

Merged
merged 1 commit into from
Sep 10, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 61 additions & 0 deletions assets/tr/QA/MultiNativQA_Mistral_7b_ZeroShot.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
import json

from llmebench.datasets import MultiNativQADataset
from llmebench.models import AzureModel
from llmebench.tasks import MultiNativQATask


def metadata():
return {
"author": "Arabic Language Technologies, QCRI, HBKU",
"model": "Mistral 7b",
"description": "Deployed on Azure.",
"scores": {},
}


def config():
return {
"dataset": MultiNativQADataset,
"task": MultiNativQATask,
"model": AzureModel,
"general_args": {"test_split": "turkish_tr"},
}


def prompt(input_sample):

# Define the question prompt
question_prompt = f"""
Please use your expertise to answer the following Turkish question. Answer in Turkish and rate your confidence level from 1 to 10.
Provide your response in the following JSON format: {{"answer": "your answer", "score": your confidence score}}.
Please provide JSON output only. No additional text. Answer should be limited to less or equal to {input_sample['length']} words.

Question: {input_sample['question']}
"""

# Define the assistant prompt
assistant_prompt = """
You are a Turkish AI assistant specialized in providing detailed and accurate answers across various fields.
Your task is to deliver clear, concise, and relevant information.
"""

return [
# {
# "role": "assistant",
# "content": assistant_prompt,
# },
{
"role": "user",
"content": question_prompt,
},
]


def post_process(response):
data = response["output"]
if "\n\n" in data:
data = data.split("\n\n")[0]
response = json.loads(data)
answer = response["answer"]
return answer
Loading