Skip to content
This repository has been archived by the owner on Aug 16, 2024. It is now read-only.

Commit

Permalink
enhance offline experience
Browse files Browse the repository at this point in the history
  • Loading branch information
mikecovlee committed Jul 26, 2024
1 parent 8a904cd commit 7d1c271
Show file tree
Hide file tree
Showing 3 changed files with 15 additions and 3 deletions.
2 changes: 2 additions & 0 deletions evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
def main(
base_model: str,
task_name: str,
data_path: str = None,
lora_weights: str = None,
load_16bit: bool = True,
load_8bit: bool = False,
Expand Down Expand Up @@ -39,6 +40,7 @@ def main(
evaluate_paramas = mlora.EvaluateConfig(
adapter_name=adapter_name,
task_name=task_name,
data_path=data_path,
batch_size=batch_size,
router_profile=router_profile,
)
Expand Down
4 changes: 3 additions & 1 deletion launch.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,10 +70,10 @@ def gen_config(
adapter_name: str = None,
file_name: str = "mlora.json",
data_path: str = None,
prompt_template: str = None,
multi_task: bool = False,
append: bool = False,
# default value provided by template
prompt_template: str = None,
cutoff_len: int = None,
save_step: int = None,
lr_scheduler: str = None,
Expand Down Expand Up @@ -185,8 +185,10 @@ def show_help():
--tasks task names separate by ';'
--adapter_name default is task name
--file_name default is 'mlora.json'
--data_path path to input data
--multi_task multi-task training
--append append to existed config
--prompt_template
--cutoff_len
--save_step
--warmup_steps
Expand Down
12 changes: 10 additions & 2 deletions mlora/tasks/common.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import logging
import os
from typing import Any, Callable, Dict, List, Optional, Tuple

import datasets as hf_datasets
Expand All @@ -22,11 +23,18 @@ def compute(self) -> Dict[str, Any]:
class AutoMetric(BasicMetric):
def __init__(self, task_name: str) -> None:
super().__init__()
path_prefix = os.getenv("MLORA_METRIC_PATH")
if path_prefix is None:
path_prefix = ""

if not path_prefix.endswith(os.sep):
path_prefix += os.sep

if ":" in task_name:
split = task_name.split(":")
self.metric_ = hf_evaluate.load(split[0], split[1])
self.metric_ = hf_evaluate.load(path_prefix + split[0], split[1])
else:
self.metric_ = hf_evaluate.load(task_name)
self.metric_ = hf_evaluate.load(path_prefix + task_name)

def add_batch(self, predictions: torch.Tensor, references: torch.Tensor):
self.metric_.add_batch(predictions=predictions, references=references)
Expand Down

0 comments on commit 7d1c271

Please sign in to comment.