Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

scripts: Use local gguf package when running from repo #2927

Merged
merged 5 commits into from
Aug 31, 2023
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion convert-falcon-hf-to-gguf.py
Original file line number Diff line number Diff line change
@@ -11,11 +11,14 @@
from pathlib import Path
from typing import Any

import gguf
import numpy as np
import torch
from transformers import AutoTokenizer # type: ignore[import]

if 'NO_LOCAL_GGUF' not in os.environ:
sys.path.insert(1, str(Path(__file__).parent / 'gguf-py' / 'gguf'))
import gguf


def bytes_to_unicode():
# ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py
5 changes: 4 additions & 1 deletion convert-gptneox-hf-to-gguf.py
Original file line number Diff line number Diff line change
@@ -11,11 +11,14 @@
from pathlib import Path
from typing import Any

import gguf
import numpy as np
import torch
from transformers import AutoTokenizer # type: ignore[import]

if 'NO_LOCAL_GGUF' not in os.environ:
sys.path.insert(1, str(Path(__file__).parent / 'gguf-py' / 'gguf'))
import gguf

# ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py


6 changes: 5 additions & 1 deletion convert-llama-ggmlv3-to-gguf.py
Original file line number Diff line number Diff line change
@@ -7,9 +7,13 @@
import sys
from pathlib import Path

import gguf
import numpy as np

import os
if 'NO_LOCAL_GGUF' not in os.environ:
sys.path.insert(1, str(Path(__file__).parent / 'gguf-py' / 'gguf'))
import gguf

# Note: Does not support GGML_QKK_64
QK_K = 256
# Items here are (block size, type size)
6 changes: 5 additions & 1 deletion convert.py
Original file line number Diff line number Diff line change
@@ -25,10 +25,14 @@
from pathlib import Path
from typing import IO, TYPE_CHECKING, Any, Callable, Generator, Iterable, Literal, Sequence, TypeVar

import gguf
import numpy as np
from sentencepiece import SentencePieceProcessor # type: ignore[import]

import os
if 'NO_LOCAL_GGUF' not in os.environ:
sys.path.insert(1, str(Path(__file__).parent / 'gguf-py' / 'gguf'))
import gguf

if TYPE_CHECKING:
from typing import TypeAlias

Original file line number Diff line number Diff line change
@@ -2,13 +2,16 @@
# train-text-from-scratch checkpoint --> gguf conversion

import argparse
import gguf
import os
import struct
import sys
import numpy as np
from pathlib import Path

if 'NO_LOCAL_GGUF' not in os.environ:
sys.path.insert(1, str(Path(__file__).parent / '..' / '..' / 'gguf-py' / 'gguf'))
import gguf

# gguf constants
LLM_KV_OPTIMIZER_TYPE = "optimizer.type"
LLM_KV_OPTIMIZER_TYPE_ADAM = "adam"