Skip to content

Commit

Permalink
Revert "nonfunctional 404 quickstart command w/ some other typo corre…
Browse files Browse the repository at this point in the history
…ctions"

This reverts commit 39fb511.
  • Loading branch information
cpacker committed Dec 18, 2023
1 parent 8499a82 commit 53df32c
Show file tree
Hide file tree
Showing 4 changed files with 3 additions and 33 deletions.
29 changes: 0 additions & 29 deletions memgpt/cli/cli.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import typer
import json
import requests
import sys
import io
import logging
Expand All @@ -26,34 +25,6 @@
from memgpt.server.constants import WS_DEFAULT_PORT, REST_DEFAULT_PORT


class QuickstartChoice(Enum):
# openai = "openai"
# azure = "azure"
memgpt_hosted = "memgpt"


def quickstart(
type: QuickstartChoice = typer.Option("memgpt", help="Quickstart setup type"),
):
"""Set the base config file with a single command"""
if type == QuickstartChoice.memgpt_hosted:
# Download the latest memgpt hosted config
url = "https://raw.githubusercontent.com/cpacker/MemGPT/main/configs/memgpt_hosted.json"
response = requests.get(url)

# Check if the request was successful
if response.status_code == 200:
# Parse the response content as JSON
config = response.json()
# Output a success message and the first few items in the dictionary as a sample
print("JSON file downloaded and loaded into a dictionary successfully.")
else:
print(f"Failed to download config from {url}. Status code:", response.status_code)

else:
raise NotImplementedError(type)


def open_folder():
"""Open a folder viewer of the MemGPT home directory"""
try:
Expand Down
2 changes: 1 addition & 1 deletion memgpt/local_llm/chat_completion_proxy.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def get_chat_completion(
# Warn the user that we're using the fallback
if not has_shown_warning:
print(
f"{CLI_WARNING_PREFIX}no wrapper specified for local LLM, using the default wrapper (you can remove this warning by specifying the wrapper with --model-wrapper)"
f"{CLI_WARNING_PREFIX}no wrapper specified for local LLM, using the default wrapper (you can remove this warning by specifying the wrapper with --wrapper)"
)
has_shown_warning = True
if endpoint_type in ["koboldcpp", "llamacpp", "webui"]:
Expand Down
2 changes: 1 addition & 1 deletion memgpt/local_llm/vllm/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def get_vllm_completion(endpoint, model, prompt, context_window, user, settings=
# Settings for the generation, includes the prompt + stop tokens, max length, etc
request = settings
request["prompt"] = prompt
request["max_tokens"] = 3000 # int(context_window - prompt_tokens)
request["max_tokens"] = int(context_window - prompt_tokens)
request["stream"] = False
request["user"] = user

Expand Down
3 changes: 1 addition & 2 deletions memgpt/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import memgpt.agent as agent
import memgpt.system as system
import memgpt.constants as constants
from memgpt.cli.cli import run, attach, version, server, open_folder, quickstart
from memgpt.cli.cli import run, attach, version, server, open_folder
from memgpt.cli.cli_config import configure, list, add
from memgpt.cli.cli_load import app as load_app
from memgpt.connectors.storage import StorageConnector
Expand All @@ -35,7 +35,6 @@
app.command(name="add")(add)
app.command(name="server")(server)
app.command(name="folder")(open_folder)
app.command(name="quickstart")(quickstart)
# load data commands
app.add_typer(load_app, name="load")

Expand Down

0 comments on commit 53df32c

Please sign in to comment.