Skip to content

Commit

Permalink
chore: Add loguru for logging in lmms_eval package
Browse files Browse the repository at this point in the history
  • Loading branch information
Luodian committed Jun 20, 2024
1 parent 8ef2474 commit 12cea76
Show file tree
Hide file tree
Showing 110 changed files with 586 additions and 858 deletions.
29 changes: 6 additions & 23 deletions lmms_eval/__main__.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,10 @@
import os
import yaml
import sys
import copy
import json
import logging

import traceback
import argparse
import torch
import numpy as np
import datetime

Expand All @@ -25,10 +23,7 @@
from lmms_eval.tasks import initialize_tasks, include_path, get_task_dict
from lmms_eval.api.registry import ALL_TASKS
from lmms_eval.logging_utils import WandbLogger
from lmms_eval.utils import PathFormatter


eval_logger = logging.getLogger("lmms-eval")
from loguru import logger as eval_logger


def _handle_non_serializable(o):
Expand Down Expand Up @@ -166,9 +161,10 @@ def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None:
print("└───────────────────────────────────────────────────────────────────────────────┘")
sys.exit(1)

set_loggers(args)
eval_logger = logging.getLogger("lmms-eval")
eval_logger.setLevel(getattr(logging, f"{args.verbosity}"))
# reset logger
eval_logger.remove()
eval_logger.add(sys.stdout, colorize=True, level=args.verbosity)
eval_logger.add(sys.stderr, level=args.verbosity)
eval_logger.info(f"Verbosity set to {args.verbosity}")
os.environ["TOKENIZERS_PARALLELISM"] = "false"

Expand Down Expand Up @@ -228,11 +224,6 @@ def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None:


def cli_evaluate_single(args: Union[argparse.Namespace, None] = None) -> None:
eval_logger = logging.getLogger("lmms-eval")
eval_logger.setLevel(getattr(logging, f"{args.verbosity}"))
eval_logger.info(f"Verbosity set to {args.verbosity}")
os.environ["TOKENIZERS_PARALLELISM"] = "false"

initialize_tasks(args.verbosity)

if args.predict_only:
Expand Down Expand Up @@ -350,13 +341,5 @@ def print_results(args, results):
print(evaluator.make_table(results, "groups"))


def set_loggers(args):
eval_logger = logging.getLogger("lmms-eval")
ch = logging.StreamHandler()
formatter = PathFormatter("%(asctime)s [%(pathname)s:%(lineno)d] %(levelname)s %(message)s", "%m-%d %H:%M:%S", timezone=args.timezone)
ch.setFormatter(formatter)
eval_logger.addHandler(ch)


if __name__ == "__main__":
cli_evaluate()
5 changes: 1 addition & 4 deletions lmms_eval/api/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,7 @@
import torch

from lmms_eval.api.registry import register_metric, register_aggregation

import logging

eval_logger = logging.getLogger("lmms-eval")
from loguru import logger as eval_logger


# Register Aggregations First
Expand Down
4 changes: 2 additions & 2 deletions lmms_eval/api/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@
from lmms_eval.api.instance import Instance
from tqdm import tqdm
from lmms_eval import utils
import logging

eval_logger = logging.getLogger("lmms-eval")

from loguru import logger as eval_logger

T = TypeVar("T", bound="lmms")

Expand Down
3 changes: 1 addition & 2 deletions lmms_eval/api/registry.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
from lmms_eval.api.model import lmms

from typing import Callable, Dict
import logging
import evaluate as hf_evaluate

eval_logger = logging.getLogger("lmms-eval")
from loguru import logger as eval_logger

MODEL_REGISTRY = {}

Expand Down
17 changes: 8 additions & 9 deletions lmms_eval/api/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import ast
import itertools
import json
import logging

import os
import random
import re
Expand Down Expand Up @@ -37,7 +37,7 @@
)
from lmms_eval.filters import build_filter_ensemble

eval_logger = logging.getLogger("lmms-eval")
from loguru import logger as eval_logger

# HuggingfaceM4/NoCaps contains truncated image in test split
# Include this inside code block to avoid error
Expand Down Expand Up @@ -789,17 +789,17 @@ def unzip_video_data(zip_file):

def untar_video_data(tar_file):
import tarfile

with tarfile.open(tar_file, "r") as tar_ref:
tar_ref.extractall(cache_dir)
eval_logger.info(f"Extracted all files from {tar_file} to {cache_dir}")



def concat_tar_parts(tar_parts, output_tar):
with open(output_tar, 'wb') as out_tar:
with open(output_tar, "wb") as out_tar:
from tqdm import tqdm

for part in tqdm(sorted(tar_parts)):
with open(part, 'rb') as part_file:
with open(part, "rb") as part_file:
out_tar.write(part_file.read())
eval_logger.info(f"Concatenated parts {tar_parts} into {output_tar}")

Expand All @@ -811,15 +811,14 @@ def concat_tar_parts(tar_parts, output_tar):
# Concatenate and extract tar files if needed
if force_unzip or (not os.path.exists(cache_dir) and len(tar_files) > 0):
tar_parts_dict = {}

# Group tar parts together
for tar_file in tar_files:
base_name = tar_file.split('.tar')[0]
base_name = tar_file.split(".tar")[0]
if base_name not in tar_parts_dict:
tar_parts_dict[base_name] = []
tar_parts_dict[base_name].append(tar_file)


# Concatenate and untar split parts
for base_name, parts in tar_parts_dict.items():
eval_logger.info(f"Extracting following tar files: {parts}")
Expand Down
6 changes: 2 additions & 4 deletions lmms_eval/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from tqdm import tqdm

import torch
import logging

import numpy as np
from datasets import Image, Sequence

Expand All @@ -17,8 +17,6 @@
import lmms_eval.api.metrics
import lmms_eval.api.registry

import re

from lmms_eval.utils import (
positional_deprecated,
run_task_tests,
Expand All @@ -28,7 +26,7 @@
simple_parse_args_string,
)

eval_logger = logging.getLogger("lmms-eval")
from loguru import logger as eval_logger


@positional_deprecated
Expand Down
7 changes: 2 additions & 5 deletions lmms_eval/logging_utils.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Code mostly from: https://github.com/EleutherAI/lm-evaluation-harness/pull/1339, credit to: https://github.com/ayulockin
import copy
import logging

import re
import os
import json
Expand All @@ -9,13 +9,10 @@
import numpy as np
from datetime import datetime
from typing import Any, Dict, List, Literal, Tuple, Union

from packaging.version import Version

from lmms_eval import utils
import tenacity

logger = logging.getLogger(__name__)
from loguru import logger

try:
import wandb
Expand Down
9 changes: 5 additions & 4 deletions lmms_eval/models/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
import os
import hf_transfer
from loguru import logger
import sys

os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
logger.remove()
logger.add(sys.stdout, level="WARNING")

AVAILABLE_MODELS = {
"llava": "Llava",
Expand Down Expand Up @@ -33,5 +34,5 @@
try:
exec(f"from .{model_name} import {model_class}")
except ImportError as e:
print(f"Failed to import {model_class} from {model_name}: {e}")
# logger.warning(f"Failed to import {model_class} from {model_name}: {e}")
pass
7 changes: 3 additions & 4 deletions lmms_eval/models/batch_gpt4.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from copy import deepcopy
from io import BytesIO
import base64
import logging

import os
import time
import json
Expand All @@ -20,14 +20,13 @@
from lmms_eval.api.instance import Instance
from lmms_eval.api.model import lmms
from lmms_eval.api.registry import register_model
from lmms_eval import utils
from loguru import logger as eval_logger

# Conditional imports
try:
from decord import VideoReader, cpu
except ImportError:
eval_logger = logging.getLogger("lmms-eval")
eval_logger.info("Decord is not installed. Video input will not be supported.")
eval_logger.warning("Decord is not installed. Video input will not be supported.")

# Constants and global configurations
API_TYPE = os.getenv("API_TYPE", "openai")
Expand Down
10 changes: 5 additions & 5 deletions lmms_eval/models/claude.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,28 +5,28 @@
import json
from typing import List, Tuple, Union
from tqdm import tqdm
import requests as url_requests
import time
import logging

from lmms_eval.api.instance import Instance
from lmms_eval.api.model import lmms
from lmms_eval.api.registry import register_model
from lmms_eval import utils

from accelerate import Accelerator, DistributedType

from PIL import Image

NUM_SECONDS_TO_SLEEP = 5
eval_logger = logging.getLogger("lmms-eval")

from loguru import logger

eval_logger = logger

try:
import anthropic
from decord import VideoReader, cpu
import numpy as np
except Exception as e:
eval_logger.error(f"Error importing claude: {e}")
eval_logger.warning(f"Error importing claude: {e}")

API_URL = os.getenv("ANTHROPIC_API_URL", "https://api.anthropic.com/v1/complete")
API_KEY = os.getenv("ANTHROPIC_API_KEY", "YOUR_API_KEY")
Expand Down
3 changes: 1 addition & 2 deletions lmms_eval/models/from_log.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import logging
import json
import os
import re
Expand All @@ -11,7 +10,7 @@
from lmms_eval.api.instance import Instance
from accelerate import Accelerator, DistributedType

eval_logger = logging.getLogger("lmms-eval")
from loguru import logger as eval_logger


@register_model("from_log")
Expand Down
5 changes: 1 addition & 4 deletions lmms_eval/models/fuyu.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,8 @@
from tqdm import tqdm
from accelerate import Accelerator, DistributedType
from accelerate.state import AcceleratorState
import logging

import logging

eval_logger = logging.getLogger("lmms-eval")
from loguru import logger as eval_logger


@register_model("fuyu")
Expand Down
4 changes: 2 additions & 2 deletions lmms_eval/models/gemini_api.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import io
import os
import time
import logging

import json

from PIL import Image
Expand All @@ -12,7 +12,7 @@
from lmms_eval.api.instance import Instance
from accelerate import Accelerator, DistributedType

eval_logger = logging.getLogger("lmms-eval")
from loguru import logger as eval_logger

try:
import google.generativeai as genai
Expand Down
4 changes: 2 additions & 2 deletions lmms_eval/models/gpt4v.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from tqdm import tqdm
import requests as url_requests
import time
import logging


from lmms_eval.api.instance import Instance
from lmms_eval.api.model import lmms
Expand All @@ -26,7 +26,7 @@

API_TYPE = os.getenv("API_TYPE", "openai")
NUM_SECONDS_TO_SLEEP = 30
eval_logger = logging.getLogger("lmms-eval")
from loguru import logger as eval_logger

if API_TYPE == "openai":
API_URL = os.getenv("OPENAI_API_URL", "https://api.openai.com/v1/chat/completions")
Expand Down
6 changes: 3 additions & 3 deletions lmms_eval/models/idefics2.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import torch
import logging

from tqdm import tqdm
from lmms_eval import utils
from lmms_eval.api.instance import Instance
Expand All @@ -14,7 +14,7 @@

warnings.filterwarnings("ignore")

eval_logger = logging.getLogger("lmms-eval")
from loguru import logger as eval_logger

DEFAULT_IMAGE_TOKEN = "<image>"
try:
Expand Down Expand Up @@ -203,7 +203,7 @@ def _collate(x):
gen_kwargs["max_new_tokens"] = 1024
if "temperature" not in gen_kwargs:
gen_kwargs["temperature"] = 0

prompts = []
for context, visual in zip(contexts, visuals):
content = []
Expand Down
5 changes: 2 additions & 3 deletions lmms_eval/models/instructblip.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import torch
import logging

import copy
from tqdm import tqdm
from lmms_eval import utils
Expand All @@ -20,8 +20,7 @@

warnings.filterwarnings("ignore")

eval_logger = logging.getLogger("lmms-eval")
transformers.logging.set_verbosity_error()
from loguru import logger as eval_logger


@register_model("instructblip")
Expand Down
Loading

0 comments on commit 12cea76

Please sign in to comment.