demos: add llm #28
Workflow file for this run
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: github-actions | |
on: pull_request | |
jobs: | |
llm_demo: | |
runs-on: ubuntu-22.04 | |
steps: | |
- uses: actions/checkout@v4 | |
with: | |
submodules: recursive | |
- name: Install OpenVINO and OpenCV | |
run: | | |
mkdir ov | |
curl https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.1/linux/l_openvino_toolkit_ubuntu22_2023.1.0.12185.47b736f63ed_x86_64.tgz | tar --directory ov --strip-components 1 -xz | |
sudo ov/install_dependencies/install_openvino_dependencies.sh | |
sudo apt install libopencv-dev | |
- name: Build llm_demo | |
run: | | |
mkdir build | |
cd build | |
cmake -DCMAKE_BUILD_TYPE=Release -DOpenVINO_DIR=../ov/runtime/cmake ../demos | |
cmake --build . --target llm_demo --config Release -j | |
- uses: actions/checkout@v4 | |
with: | |
repository: openlm-research/open_llama_3b_v2 | |
ref: main | |
path: open_llama_3b_v2 | |
lfs: true | |
github-server-url: https://huggingface.co | |
- uses: actions/setup-python@v4 | |
with: | |
python-version: 3.11 | |
cache: 'pip' | |
- name: Download and convert open_llama_3b_v2 and the vocab | |
working-directory: open_llama_3b_v2 | |
run: | | |
python -m pip install --extra-index-url https://download.pytorch.org/whl/cpu onnx git+https://github.com/huggingface/optimum-intel.git | |
source ../ov/setupvars.sh | |
python -c "from optimum.intel.openvino import OVModelForCausalLM; OVModelForCausalLM.from_pretrained('.', export=True).save_pretrained('.')" | |
python ../demos/thirdparty/llama.cpp/convert.py . --vocab-only --outfile vocab.gguf | |
- name: Run llm_demo | |
run: | | |
source ov/setupvars.sh | |
./build/intel64/Release/llm_demo open_llama_3b_v2/openvino_model.xml open_llama_3b_v2/vocab.gguf "return 0" |