Skip to content

Commit

Permalink
🔨 windows installation improvements & version bump
Browse files Browse the repository at this point in the history
  • Loading branch information
yusufcanb committed Mar 1, 2024
1 parent aef9293 commit 2524b93
Show file tree
Hide file tree
Showing 2 changed files with 148 additions and 77 deletions.
74 changes: 50 additions & 24 deletions install.ps1
Original file line number Diff line number Diff line change
Expand Up @@ -16,58 +16,84 @@ if ($env:PROCESSOR_ARCHITECTURE -eq 'AMD64') {
}

# Download URL Construction
$version = "1.0-rc3"
$version = "1.0"
$base_url = "https://github.com/yusufcanb/tlm/releases/download"
$download_url = "${base_url}/${version}/tlm_${version}_${os}_${arch}.exe"

# Ollama check
$ollamaHost = $env:OLLAMA_HOST
if (-not $ollamaHost) {
$ollamaHost = "http://localhost:11434"
}

# Ollama check - For Windows, we'll assume Ollama is installed directly on the system
try {
Invoke-WebRequest -Uri "http://localhost:11434" -UseBasicParsing -ErrorAction Stop | Out-Null
Invoke-WebRequest -Uri $ollamaHost -UseBasicParsing -ErrorAction Stop | Out-Null
} catch {
Write-Host "ERR: Ollama not found." -ForegroundColor red
Write-Host "If you have Ollama installed, please make sure it's running and accessible at $ollamaHost" -ForegroundColor red
Write-Host "or configure OLLAMA_HOST environment variable." -ForegroundColor red
Write-Host ""
Write-Host ">>> If have Ollama on your system or network, you can set the OLLAMA_HOST like below;"
Write-Host " `$env:OLLAMA_HOST` = 'http://localhost:11434'"
Write-Host ""
Write-Host ""
Write-Host ">>> If you don't have Ollama installed, you can install it using the following methods;"
Write-Host ""
Write-Host "*** On Windows: ***" -ForegroundColor green
Write-Host " *** Windows: ***" -ForegroundColor green
Write-Host " Download instructions can be followed at the following link: https://ollama.com/download"
Write-Host ""
Write-Host "Ollama can run with GPU acceleration inside Docker containers for Nvidia GPUs."
Write-Host "To get started using the Docker image, please follow these steps:"
Write-Host " *** Official Docker Images: ***" -ForegroundColor green
Write-Host ""
Write-Host "1. *** CPU only: ***" -ForegroundColor green
Write-Host " docker run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama"
Write-Host " Ollama can run with GPU acceleration inside Docker containers for Nvidia GPUs."
Write-Host " To get started using the Docker image, please follow these steps:"
Write-Host ""
Write-Host "2. *** Nvidia GPU: ***" -ForegroundColor green
Write-Host " docker run -d --gpus=all -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama"
Write-Host " 1. *** CPU only: ***" -ForegroundColor green
Write-Host " docker run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama"
Write-Host ""
Write-Host " 2. *** Nvidia GPU: ***" -ForegroundColor green
Write-Host " docker run -d --gpus=all -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama"
Write-Host ""
Write-Host ""
Write-Host "Installation aborted." -ForegroundColor red
Write-Host "Please install Ollama using the methods above and try again." -ForegroundColor red
return -1
Write-Host "Please install or configure Ollama using the methods above and try again." -ForegroundColor red
return
}

# Create Application Directory
$install_directory = "C:\Users\$env:USERNAME\AppData\Local\Programs\tlm"
if (-not (Test-Path $install_directory)) {
New-Item -ItemType Directory -Path $install_directory | Out-Null
}

# Download the binary
Write-Host "Downloading tlm version ${version} for ${os}/${arch}..."
try {
Invoke-WebRequest -Uri $download_url -OutFile 'tlm.exe' -UseBasicParsing -ErrorAction Stop | Out-Null
Invoke-WebRequest -Uri $download_url -OutFile "$install_directory\tlm.exe" -UseBasicParsing -ErrorAction Stop | Out-Null
} catch {
Write-Error "Download failed. Please check your internet connection and try again."
return -1
}

# Move to installation directory
Write-Host "Installing tlm..."
#try {
# Move-Item -Path 'tlm.exe' -Destination 'C:\Windows\Program Files\tlm\' -Force
#} catch {
# Write-Error "Installation requires administrator permissions. Please elevate with rights and run the script again."
# exit 1
#}
# Add installation directory to PATH
$env:Path += ";$install_directory"

# Ollama deployment - specific to the original script, might need modification
# Configure tlm to use Ollama
try {
.\tlm.exe deploy
."$install_directory\tlm.exe" config set llm.host $ollamaHost
} catch {
Write-Error "tlm config set llm.host failed."
return 1
}

# Deploy tlm
try {

."$install_directory\tlm.exe" deploy
} catch {
Write-Error "tlm deploy failed."
return 1
}

Write-Host "Type '.\tlm.exe help' to get started."
Write-Host ""
Write-Host "Installation completed successfully."
Write-Host "Type 'tlm help' to get started."
151 changes: 98 additions & 53 deletions install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,79 +6,117 @@ status() { echo ">>> $*" >&2; }
error() { echo "ERROR $*"; }
warning() { echo "WARNING: $*"; }

print_message() {
local message="$1"
local color="$2"
echo -e "\e[${color}m${message}\e[0m"
}

# OS and Architecture Detection
if [[ "$OSTYPE" == "linux-gnu"* ]]; then
os="linux"
os="linux"
elif [[ "$OSTYPE" == "darwin"* ]]; then
os="darwin"
os="darwin"
else
error "Unsupported operating system. Only Linux and macOS are currently supported."
exit 1
error "Unsupported operating system. Only Linux and macOS are currently supported."
exit 1
fi

if [[ "$(uname -m)" == "x86_64" ]]; then
arch="amd64"
arch="amd64"
elif [[ "$(uname -m)" == "aarch64" || "$(uname -m)" == "arm64" ]]; then
arch="arm64"
arch="arm64"
else
error "Unsupported architecture. tlm requires a 64-bit system (x86_64 or arm64)."
exit 1
error "Unsupported architecture. tlm requires a 64-bit system (x86_64 or arm64)."
exit 1
fi

# Download URL Construction
version="1.0-rc3"
version="1.0"
base_url="https://github.com/yusufcanb/tlm/releases/download"
download_url="${base_url}/${version}/tlm_${version}_${os}_${arch}"

if [[ -v OLLAMA_HOST ]]; then
ollama_host=$OLLAMA_HOST
else
ollama_host="http://localhost:11434"
fi

# Ollama check
if ! curl -fsSL http://localhost:11434 &> /dev/null; then
error "Ollama not found."
if [[ "$os" == "darwin" ]]; then
status ""
status "*** On macOS: ***"
status ""
status "Ollama can run with GPU acceleration inside Docker containers for Nvidia GPUs."
status "To get started using the Docker image, please follow these steps:"
status ""
status "1. *** CPU only: ***"
status " docker run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama"
status ""
status "2. *** GPU Acceleration: ***"
status " This option requires running Ollama outside of Docker"
status " To get started, simply download and install Ollama."
status " https://ollama.com/download"
status ""
status ""
status "Installation aborted. Please install Ollama using the methods above and try again."
exit 1
if ! curl -fsSL $ollama_host &>/dev/null; then
if [[ "$os" == "darwin" ]]; then
print_message "ERR: Ollama not found." "31" # Red color
print_message "If you have Ollama installed, please make sure it's running and accessible at ${ollama_host}" "31"
print_message "or configure OLLAMA_HOST environment variable." "31"
echo """
>>> If have Ollama on your system or network, you can set the OLLAMA_HOST like below;
elif [[ "$os" == "linux" ]]; then
status ""
status "*** On Linux: ***"
status ""
status "Ollama can run with GPU acceleration inside Docker containers for Nvidia GPUs."
status "To get started using the Docker image, please follow these steps:"
status ""
status "1. *** CPU only: ***"
status " docker run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama"
status ""
status "2. *** Nvidia GPU: ***"
status " docker run -d --gpus=all -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama"
status ""
status ""
status "Installation aborted. Please install Ollama using the methods above and try again."
exit 1
$ export OLLAMA_HOST=http://localhost:11434
>>> If you don't have Ollama installed, you can install it using the following methods;
$(print_message "*** macOS: ***" "32")
Download instructions can be followed at the following link: https://ollama.com/download
$(print_message "*** Official Docker Images: ***" "32")
Ollama can run with GPU acceleration inside Docker containers for Nvidia GPUs.
To get started using the Docker image, please follow these steps:
$(print_message "1. *** CPU only: ***" "32")
$ docker run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama
$(print_message "2. *** Nvidia GPU: ***" "32")
$ docker run -d --gpus=all -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama
"""
print_message "Installation aborted..." "31"
print_message "Please install or configure Ollama using the methods above and try again." "31"
exit 1

elif [[ "$os" == "linux" ]]; then
print_message "ERR: Ollama not found." "31" # Red color
print_message "If you have Ollama installed, please make sure it's running and accessible at ${ollama_host}" "31"
print_message "or configure OLLAMA_HOST environment variable." "31"
echo """
>>> If have Ollama on your system or network, you can set the OLLAMA_HOST like below;
fi
$ export OLLAMA_HOST=http://localhost:11434
>>> If you don't have Ollama installed, you can install it using the following methods;
$(print_message "*** Linux: ***" "32")
Download instructions can be followed at the following link: https://ollama.com/download
$(print_message "*** Official Docker Images: ***" "32")
Ollama can run with GPU acceleration inside Docker containers for Nvidia GPUs.
To get started using the Docker image, please follow these steps:
$(print_message "1. *** CPU only: ***" "32")
$ docker run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama
$(print_message "2. *** Nvidia GPU: ***" "32")
$ docker run -d --gpus=all -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama
"""
print_message "Installation aborted..." "31"
print_message "Please install or configure Ollama using the methods above and try again." "31"
exit 1
fi
fi

# Download the binary
status "Downloading tlm version ${version} for ${os}/${arch}..."
if ! curl -fsSL -o tlm ${download_url}; then
error "Download failed. Please check your internet connection and try again."
exit 1
error "Download failed. Please check your internet connection and try again."
exit 1
fi

# Make executable
Expand All @@ -97,14 +135,21 @@ if [ "$(id -u)" -ne 0 ]; then
SUDO="sudo"
fi

$SUDO mv tlm /usr/local/bin/;
$SUDO mv tlm /usr/local/bin/

# set ollama host
if ! tlm config set llm.host ${ollama_host} &>/dev/null; then
error "tlm config set llm.host ${ollama_host} failed."
exit 1
fi

# deploy tlm modelfiles
if ! tlm deploy; then
error ""
exit 1
error "tlm deploy ${ollama_host} failed."
exit 1
else
echo ""
echo ""
fi

status "Type 'tlm' to get started."
exit 0
exit 0

0 comments on commit 2524b93

Please sign in to comment.