Skip to content

Commit

Permalink
Revert "Add Programming to Foundry (#441)"
Browse files Browse the repository at this point in the history
This reverts commit 52a3500.
  • Loading branch information
rishab-partha authored Aug 24, 2023
1 parent 52a3500 commit ca78817
Show file tree
Hide file tree
Showing 10 changed files with 4 additions and 546 deletions.
4 changes: 1 addition & 3 deletions llmfoundry/models/hf/hf_causal_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,7 @@

# required for loading a python model into composer
import transformers
from composer.metrics.nlp import (InContextLearningCodeEvalAccuracy,
InContextLearningLMAccuracy,
from composer.metrics.nlp import (InContextLearningLMAccuracy,
InContextLearningLMExpectedCalibrationError,
InContextLearningMCExpectedCalibrationError,
InContextLearningMultipleChoiceAccuracy,
Expand Down Expand Up @@ -75,7 +74,6 @@ def __init__(
InContextLearningLMAccuracy(),
InContextLearningMultipleChoiceAccuracy(),
InContextLearningQAAccuracy(),
InContextLearningCodeEvalAccuracy(),
InContextLearningLMExpectedCalibrationError(),
InContextLearningMCExpectedCalibrationError()
]
Expand Down
4 changes: 1 addition & 3 deletions llmfoundry/models/mpt/modeling_mpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,7 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
from composer.metrics import (InContextLearningCodeEvalAccuracy,
InContextLearningLMAccuracy,
from composer.metrics import (InContextLearningLMAccuracy,
InContextLearningLMExpectedCalibrationError,
InContextLearningMCExpectedCalibrationError,
InContextLearningMultipleChoiceAccuracy,
Expand Down Expand Up @@ -699,7 +698,6 @@ def __init__(
InContextLearningLMAccuracy(),
InContextLearningMultipleChoiceAccuracy(),
InContextLearningQAAccuracy(),
InContextLearningCodeEvalAccuracy(),
InContextLearningLMExpectedCalibrationError(),
InContextLearningMCExpectedCalibrationError(),
]
Expand Down
5 changes: 0 additions & 5 deletions llmfoundry/utils/builders.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,8 +174,6 @@ def _validate_cfg(icl_cfg: DictConfig):
]
elif icl_cfg.icl_task_type == 'question_answering':
icl_cfg.metric_names = ['InContextLearningQAAccuracy']
elif icl_cfg.icl_task_type == 'code_evaluation':
icl_cfg.metric_names = ['InContextLearningCodeEvalAccuracy']
else:
raise ValueError(
f'No metric_names defined, unable to build default metrics for icl_task_type={icl_cfg.icl_task_type}.'
Expand All @@ -191,8 +189,6 @@ def _validate_cfg(icl_cfg: DictConfig):
icl_cfg.max_seq_len = default_max_seq_len
if 'batch_size' not in icl_cfg:
icl_cfg.batch_size = default_batch_size
if 'num_beams' not in icl_cfg:
icl_cfg.num_beams = 1

for icl_cfg in icl_tasks_list:
_validate_cfg(icl_cfg)
Expand Down Expand Up @@ -222,7 +218,6 @@ def _validate_cfg(icl_cfg: DictConfig):
example_delimiter=icl_cfg.example_delimiter,
continuation_delimiter=icl_cfg.continuation_delimiter,
destination_path=destination_path,
generations_per_sample=icl_cfg.num_beams,
has_categories=icl_cfg.get('has_categories', False),
)
if hasattr(
Expand Down
164 changes: 0 additions & 164 deletions scripts/eval/local_data/programming/human_eval.jsonl

This file was deleted.

161 changes: 0 additions & 161 deletions scripts/eval/local_data/programming/processed_humaneval_c++.jsonl

This file was deleted.

This file was deleted.

Loading

0 comments on commit ca78817

Please sign in to comment.