Skip to content

Commit

Permalink
[4/n] Add 'start' command to aiconfig server and CLI (#1060)
Browse files Browse the repository at this point in the history
[4/n] Add 'start' command to aiconfig server and CLI


The current aiconfig CLI has an 'edit' command which starts both the
frontend and backend servers, and also loads the aiconfig from disk.

In the same vein, this diff introduces a 'start' command that is used to
just start the aiconfig server. This is used by the vscode extension to
spawn a server when an aiconfig file is opened.

Unlike the 'edit' command, 'start':
* Doesn't load the aiconfig on server init. Instead, the
/api/load_content endpoint is called to initialize.
* Doesn't search for open ports. The vscode client is responsible for
finding an open port. This is because the client needs to be in control
of the server lifecycle.

A small change that was also added to this diff: using `dotenv` to load
an env file.

Test Plan:
* At the top of stack

---
Stack created with [Sapling](https://sapling-scm.com). Best reviewed
with
[ReviewStack](https://reviewstack.dev/lastmile-ai/aiconfig/pull/1060).
* #1076
* __->__ #1060
* #1059
* #1058
* #1057
  • Loading branch information
saqadri authored Jan 30, 2024
2 parents 1d47fd0 + 088a398 commit 56433f0
Show file tree
Hide file tree
Showing 5 changed files with 311 additions and 79 deletions.
121 changes: 80 additions & 41 deletions python/src/aiconfig/Config.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,14 +53,17 @@
ModelParserRegistry.register_model_parser(
DefaultAnyscaleEndpointParser("AnyscaleEndpoint")
)
ModelParserRegistry.register_model_parser(GeminiModelParser("gemini-pro"), ["gemini-pro"])
ModelParserRegistry.register_model_parser(
GeminiModelParser("gemini-pro"), ["gemini-pro"]
)
ModelParserRegistry.register_model_parser(ClaudeBedrockModelParser())
ModelParserRegistry.register_model_parser(HuggingFaceTextGenerationParser())
for model in gpt_models_extra:
ModelParserRegistry.register_model_parser(DefaultOpenAIParser(model))
ModelParserRegistry.register_model_parser(PaLMChatParser())
ModelParserRegistry.register_model_parser(PaLMTextParser())


class AIConfigRuntime(AIConfig):
# A mapping of model names to their respective parsers

Expand Down Expand Up @@ -118,14 +121,37 @@ def load(cls, config_filepath: str) -> "AIConfigRuntime":
else:
data = file.read()

# load the file as bytes and let pydantic handle the parsing
# validated_data = AIConfig.model_validate_json(file.read())
aiconfigruntime = cls.model_validate_json(data)
update_model_parser_registry_with_config_runtime(aiconfigruntime)

config_runtime = cls.load_json(data)
# set the file path. This is used when saving the config
aiconfigruntime.file_path = config_filepath
return aiconfigruntime
config_runtime.file_path = config_filepath
return config_runtime

@classmethod
def load_json(cls, config_json: str) -> "AIConfigRuntime":
"""
Constructs AIConfigRuntime from provided JSON and returns it.
Args:
config_json (str): The JSON representing the AIConfig.
"""

config_runtime = cls.model_validate_json(config_json)
update_model_parser_registry_with_config_runtime(config_runtime)

return config_runtime

@classmethod
def load_yaml(cls, config_yaml: str) -> "AIConfigRuntime":
"""
Constructs AIConfigRuntime from provided YAML and returns it.
Args:
config_yaml (str): The YAML representing the AIConfig.
"""

yaml_data = yaml.safe_load(config_yaml)
config_json = json.dumps(yaml_data)
return cls.load_json(config_json)

@classmethod
def load_from_workbook(cls, workbook_id: str) -> "AIConfigRuntime":
Expand Down Expand Up @@ -431,17 +457,6 @@ def save(
config_filepath (str, optional): The file path to the JSON or YAML configuration file.
Defaults to "aiconfig.json" or "aiconfig.yaml", depending on the mode.
"""
# Decide if we want to serialize as YAML or JSON

# AIConfig json should only contain the core data fields. These are auxiliary fields that should not be persisted
exclude_options = {
"prompt_index": True,
"file_path": True,
"callback_manager": True,
}

if not include_outputs:
exclude_options["prompts"] = {"__all__": {"outputs"}}

default_filepath = (
"aiconfig.yaml" if mode == "yaml" else "aiconfig.json"
Expand All @@ -457,30 +472,54 @@ def save(
# Default to JSON
mode = "json"

config_string = self.to_string(include_outputs, mode)

with open(config_filepath, "w") as file:
# Serialize the AIConfig to JSON
json_data = self.model_dump(
mode="json",
exclude=exclude_options,
exclude_none=True,
file.write(config_string)

def to_string(
self,
include_outputs: bool = True,
mode: Literal["json", "yaml"] = "json",
) -> str:
"""
Returns the well-formatted string representing the AIConfig object.
Note that this method will return the string that would be saved as a .aiconfig file using the save() method.
To get the raw string representation of the AIConfig object, use the __str__() method.
"""
# AIConfig json should only contain the core data fields. These are auxiliary fields that should not be persisted
exclude_options = {
"prompt_index": True,
"file_path": True,
"callback_manager": True,
}

if not include_outputs:
exclude_options["prompts"] = {"__all__": {"outputs"}}

# Serialize the AIConfig to JSON
json_data = self.model_dump(
mode="json",
exclude=exclude_options,
exclude_none=True,
)

if json_data.get("$schema", None) is None:
# Set the schema if it is not set
json_data["$schema"] = "https://json.schemastore.org/aiconfig-1.0"

if mode == "yaml":
# Save AIConfig JSON as YAML string
return yaml.dump(
json_data,
indent=2,
)
else:
# Save AIConfig as JSON string, with the schema specified
return json.dumps(
json_data,
indent=2,
)
if mode == "yaml":
# Save AIConfig JSON as YAML to the file
yaml.dump(
json_data,
file,
indent=2,
)
else:
# Save AIConfig as JSON to the file, with the schema specified
json_data[
"$schema"
] = "https://json.schemastore.org/aiconfig-1.0"
json.dump(
json_data,
file,
indent=2,
)

def get_output_text(
self, prompt: str | Prompt, output: Optional[dict] = None
Expand Down
115 changes: 96 additions & 19 deletions python/src/aiconfig/editor/server/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import time
import uuid
import webbrowser
from typing import Any, Dict, Type, Union
from typing import Any, Dict, Literal, Type, Union

import lastmile_utils.lib.core.api as core_utils
import result
Expand All @@ -25,6 +25,7 @@
OpArgs,
ServerMode,
ServerState,
StartServerConfig,
ValidatedPath,
get_http_response_load_user_parser_module,
get_server_state,
Expand All @@ -42,13 +43,14 @@
from flask_cors import CORS
from result import Err, Ok, Result

from aiconfig.schema import ExecuteResult, Output, Prompt
from aiconfig.schema import ExecuteResult, Output, Prompt, PromptMetadata

logging.getLogger("werkzeug").disabled = True

logging.basicConfig(format=core_utils.LOGGER_FMT)
LOGGER = logging.getLogger(__name__)

# TODO: saqadri - use logs directory to save logs
log_handler = logging.FileHandler("editor_flask_server.log", mode="a")
formatter = logging.Formatter(core_utils.LOGGER_FMT)
log_handler.setFormatter(formatter)
Expand All @@ -61,37 +63,49 @@


def run_backend_server(
edit_config: EditServerConfig, aiconfigrc_path: str
initialization_settings: StartServerConfig | EditServerConfig,
aiconfigrc_path: str,
) -> Result[str, str]:
LOGGER.setLevel(edit_config.log_level)
LOGGER.info("Edit config: %s", edit_config.model_dump_json())
LOGGER.setLevel(initialization_settings.log_level)
LOGGER.info("Edit config: %s", initialization_settings.model_dump_json())
LOGGER.info(
f"Starting server on http://localhost:{edit_config.server_port}"
f"Starting server on http://localhost:{initialization_settings.server_port}"
)
try:
LOGGER.info(
f"Opening browser at http://localhost:{edit_config.server_port}"
)
webbrowser.open(f"http://localhost:{edit_config.server_port}")
except Exception as e:
LOGGER.warning(
f"Failed to open browser: {e}. Please open http://localhost:{port} manually."
)

if isinstance(initialization_settings, EditServerConfig):
try:
LOGGER.info(
f"Opening browser at http://localhost:{initialization_settings.server_port}"
)
webbrowser.open(
f"http://localhost:{initialization_settings.server_port}"
)
except Exception as e:
LOGGER.warning(
f"Failed to open browser: {e}. Please open http://localhost:{initialization_settings.server_port} manually."
)
else:
# In the case of the 'start' command, just the webserver is started up, and there's no need to open the browser
pass

app.server_state = ServerState() # type: ignore
res_server_state_init = init_server_state(
app, edit_config, aiconfigrc_path
app, initialization_settings, aiconfigrc_path
)
match res_server_state_init:
case Ok(_):
LOGGER.info("Initialized server state")
debug = edit_config.server_mode in [
debug = initialization_settings.server_mode in [
ServerMode.DEBUG_BACKEND,
ServerMode.DEBUG_SERVERS,
]
LOGGER.info(f"Running in {edit_config.server_mode} mode")
LOGGER.info(
f"Running in {initialization_settings.server_mode} mode"
)
app.run(
port=edit_config.server_port, debug=debug, use_reloader=debug
port=initialization_settings.server_port,
debug=debug,
use_reloader=debug,
)
return Ok("Done")
case Err(e):
Expand Down Expand Up @@ -188,6 +202,45 @@ def load() -> FlaskResponse:
).to_flask_format()


@app.route("/api/load_content", methods=["POST"])
def load_content() -> FlaskResponse:
state = get_server_state(app)
request_json = request.get_json()

content = request_json.get("content", None)
mode = request_json.get("mode", "json")
if content is None:
# In this case let's create an empty AIConfig
aiconfig = AIConfigRuntime.create() # type: ignore
model_ids = ModelParserRegistry.parser_ids()
if len(model_ids) > 0:
aiconfig.add_prompt(
"prompt_1",
Prompt(
name="prompt_1",
input="",
metadata=PromptMetadata(model=model_ids[0]),
),
)

state.aiconfig = aiconfig
return HttpResponseWithAIConfig(
message="Created", aiconfig=aiconfig
).to_flask_format()

# If we have been provided with the aiconfig string, use it to instantiate
# an AIConfigRuntime object
if mode == "json":
aiconfig = AIConfigRuntime.load_json(content)
else:
aiconfig = AIConfigRuntime.load_yaml(content)

state.aiconfig = aiconfig
return HttpResponseWithAIConfig(
message="Loaded", aiconfig=aiconfig
).to_flask_format()


@app.route("/api/save", methods=["POST"])
def save() -> FlaskResponse:
state = get_server_state(app)
Expand Down Expand Up @@ -225,6 +278,30 @@ def save() -> FlaskResponse:
).to_flask_format()


@app.route("/api/to_string", methods=["POST"])
def to_string() -> FlaskResponse:
state = get_server_state(app)
aiconfig = state.aiconfig
request_json = request.get_json()
mode: Literal["json", "yaml"] = request_json.get("mode", "json")
include_outputs: bool = request_json.get("include_outputs", True)

if mode != "json" and mode != "yaml":
return HttpResponseWithAIConfig(
message=f"Invalid mode: {mode}", code=400, aiconfig=None
).to_flask_format()

if aiconfig is None:
return HttpResponseWithAIConfig(
message="No AIConfig loaded", code=400, aiconfig=None
).to_flask_format()
else:
aiconfig_string = aiconfig.to_string(
include_outputs=include_outputs, mode=mode
)
return FlaskResponse(({"aiconfig_string": aiconfig_string}, 200))


@app.route("/api/create", methods=["POST"])
def create() -> FlaskResponse:
state = get_server_state(app)
Expand Down
Loading

0 comments on commit 56433f0

Please sign in to comment.