Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Initial lint run #54

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 26 additions & 0 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
name: Continuous Integration

on:
push:


env:
PYTHON_VERSION: 3.11.4
jobs:
lint:
runs-on: ubuntu-latest
name: Lint
steps:
- uses: actions/checkout@v3
- name: Install poetry
run: pipx install poetry
- uses: actions/setup-python@v4
with:
python-version: ${{env.PYTHON_VERSION}}
cache: 'poetry'
architecture: 'x64'
- name: Install
run: poetry install
- name: Lint
run: make lint-ci-cd

30 changes: 28 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,4 +1,10 @@
.PHONY: examples agents evals logging
POETRY := poetry
FOLDERS=log10 examples
BLUE=\033[0;34m
NC=\033[0m # No Color

.PHONY: examples agents evals logging lint lint-ci-cd lint-flake8 lint-mypy check-lock lint-ci-cd


agents:
LOG10_EXAMPLES_MODEL=noop python examples/agents/biochemist.py
Expand Down Expand Up @@ -47,4 +53,24 @@ evals:
(cd examples/evals && python compile.py)
(cd examples/evals && python fuzzy.py)

examples: agents evals logging
examples: agents evals logging

autolint:
${POETRY} run black .
${POETRY} run ruff --fix .
${POETRY} run isort ${FOLDERS}

lint-ci-cd: check-lock lint-ruff lint-pyright

lint: autolint check-lock lint-ruff lint-pyright

lint-pyright:
@echo "\n${BLUE}Running mypy...${NC}\n"
${POETRY} run pyright ${FOLDERS}

lint-ruff: ## Run the flake8 linter
@echo "\n${BLUE}Running ruff...${NC}\n"
${POETRY} run ruff .

check-lock:
${POETRY} lock --check
Empty file added examples/agents/__init__.py
Empty file.
18 changes: 12 additions & 6 deletions examples/agents/biochemist.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,21 @@
import os
from log10.anthropic import Anthropic
from log10.llm import NoopLLM
from log10.agents.camel import camel_agent
from typing import Any, Optional

from dotenv import load_dotenv

from log10.openai import OpenAI
from log10.agents.camel import camel_agent
from log10.anthropic import Anthropic
from log10.llm import NoopLLM
from log10.load import log10
from log10.openai import OpenAI

load_dotenv()

# Select one of OpenAI or Anthropic models
model = os.environ.get("LOG10_EXAMPLES_MODEL", "gpt-3.5-turbo-16k")
max_turns = 30

llm = None
llm: Optional[Any] = None
summary_model = None
if "claude" in model:
import anthropic
Expand All @@ -31,11 +33,15 @@
summary_model = "gpt-3.5-turbo-16k"
llm = OpenAI({"model": model})

task_prompt = """Perform a molecular dynamics solution of a molecule: CN1CCC[C@H]1c2cccnc2.
Design and conduct a 100 ns molecular dynamics simulation of the molecule CN1CCC[C@H]1c2cccnc2
in an explicit solvent environment using the CHARMM force field and analyze the conformational
changes and hydrogen bonding patterns over time"""
# example calls from playground (select 1)
camel_agent(
user_role="Poor PhD Student",
assistant_role="Experienced Computational Chemist",
task_prompt="Perform a molecular dynamics solution of a molecule: CN1CCC[C@H]1c2cccnc2. Design and conduct a 100 ns molecular dynamics simulation of the molecule CN1CCC[C@H]1c2cccnc2 in an explicit solvent environment using the CHARMM force field and analyze the conformational changes and hydrogen bonding patterns over time",
task_prompt=task_prompt,
summary_model=summary_model,
max_turns=max_turns,
llm=llm,
Expand Down
41 changes: 33 additions & 8 deletions examples/agents/code_optimizer.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,23 @@
import logging
import os
from typing import Any, Optional

from log10.agents.camel import camel_agent
from log10.anthropic import Anthropic
from log10.evals import compile
from log10.llm import NoopLLM
from log10.load import log10
from log10.evals import compile
from log10.agents.camel import camel_agent
from log10.openai import OpenAI
from log10.tools import code_extractor

_logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)

# Select one of OpenAI or Anthropic models
model = os.environ.get("LOG10_EXAMPLES_MODEL", "gpt-3.5-turbo-16k")
max_turns = 10

llm = None
llm: Optional[Any] = None
summary_model = None
extraction_model = None
if "claude" in model:
Expand All @@ -33,12 +39,32 @@
extraction_model = "gpt-4"
llm = OpenAI({"model": model})

task_prompt = """Correct the following code.
```
#include <stdio.h>
#include <string.h>
int main() {
char password[8];
int granted = 0;
printf("Enter password: ");
scanf("%s", password);
if (strcmp(password, "password") == 0){
granted = 1;
}
if (granted) {
printf("Access granted.\\n");
} else {
printf("Access denied.\\n");
}
return 0;
}```
"""

# example calls from playground (select 1)
user_messages, assistant_messages = camel_agent(
user_role="C developer",
assistant_role="Cybersecurity expert",
task_prompt='Correct the following code.\n\n#include <stdio.h>\n#include <string.h>\n\nint main() {\n char password[8];\n int granted = 0;\n\n printf("Enter password: ");\n scanf("%s", password);\n\n if (strcmp(password, "password") == 0) {\n granted = 1;\n }\n\n if (granted) {\n printf("Access granted.\\n");\n } else {\n printf("Access denied.\\n");\n }\n\n return 0;\n}',
task_prompt="Correct the following code.\n\n",
summary_model=summary_model,
max_turns=max_turns,
llm=llm,
Expand All @@ -48,12 +74,11 @@

# Next extract just the C code
code = code_extractor(full_response, "C", extraction_model, llm=llm)
print(f"Extracted code\n###\n{code}")
_logger.info(f"Extracted code\n###\n{code}")

# Evaluate if the code compiles
result = compile(code)
if result is True:
print("Compilation successful")
_logger.info("Compilation successful")
else:
print("Compilation failed with error:")
print(result[1])
_logger.error(f"Compilation failed with error: {result[1]}")
10 changes: 6 additions & 4 deletions examples/agents/coder.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
import os
from typing import Any, Optional

from dotenv import load_dotenv

from log10.agents.camel import camel_agent
from log10.anthropic import Anthropic
from log10.llm import NoopLLM
from log10.load import log10
from log10.agents.camel import camel_agent
from dotenv import load_dotenv

from log10.openai import OpenAI

load_dotenv()
Expand All @@ -13,7 +15,7 @@
model = os.environ.get("LOG10_EXAMPLES_MODEL", "gpt-3.5-turbo-16k")
max_turns = 30

llm = None
llm: Optional[Any] = None
summary_model = None
if "claude" in model:
import anthropic
Expand Down
32 changes: 27 additions & 5 deletions examples/agents/cybersecurity_expert.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
import os
from typing import Any, Optional

from dotenv import load_dotenv

from log10.agents.camel import camel_agent
from log10.anthropic import Anthropic
from log10.llm import NoopLLM
from log10.load import log10
from log10.agents.camel import camel_agent
from dotenv import load_dotenv

from log10.openai import OpenAI

load_dotenv()
Expand All @@ -13,7 +15,7 @@
model = os.environ.get("LOG10_EXAMPLES_MODEL", "gpt-3.5-turbo-16k")
max_turns = 30

llm = None
llm: Optional[Any] = None
summary_model = None
if "claude" in model:
import anthropic
Expand All @@ -31,11 +33,31 @@
summary_model = "gpt-3.5-turbo-16k"
llm = OpenAI({"model": model})

task_prompt = """Correct the following code.
```
#include <stdio.h>
#include <string.h>
int main() {
i
char password[8];
int granted = 0;
printf("Enter password: ");
scanf("%s", password);
if (strcmp(password, "password") == 0) {
granted = 1;
}
if (granted) {
printf("Access granted.\\n");
} else {
printf("Access denied.\\n");
}
return 0;
}```"""
# example calls from playground (select 1)
camel_agent(
user_role="C developer",
assistant_role="Cybersecurity expert",
task_prompt='Correct the following code.\n\n#include <stdio.h>\n#include <string.h>\n\nint main() {\n char password[8];\n int granted = 0;\n\n printf("Enter password: ");\n scanf("%s", password);\n\n if (strcmp(password, "password") == 0) {\n granted = 1;\n }\n\n if (granted) {\n printf("Access granted.\\n");\n } else {\n printf("Access denied.\\n");\n }\n\n return 0;\n}',
task_prompt=task_prompt,
summary_model=summary_model,
max_turns=max_turns,
llm=llm,
Expand Down
10 changes: 6 additions & 4 deletions examples/agents/email_generator.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
import os
from typing import Any, Optional

from dotenv import load_dotenv

from log10.agents.camel import camel_agent
from log10.anthropic import Anthropic
from log10.llm import NoopLLM
from log10.load import log10
from log10.agents.camel import camel_agent
from dotenv import load_dotenv

from log10.openai import OpenAI

load_dotenv()
Expand All @@ -13,7 +15,7 @@
model = os.environ.get("LOG10_EXAMPLES_MODEL", "gpt-3.5-turbo-16k")
max_turns = 30

llm = None
llm: Optional[Any] = None
summary_model = None
if "claude" in model:
import anthropic
Expand Down
5 changes: 3 additions & 2 deletions examples/agents/scrape_summarizer.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,16 @@
import os
from typing import Any, Optional

from log10.agents.scrape_summarizer import scrape_summarizer
from log10.anthropic import Anthropic
from log10.llm import NoopLLM
from log10.load import log10
from log10.openai import OpenAI


# Select one of OpenAI or Anthropic models
model = os.environ.get("LOG10_EXAMPLES_MODEL", "gpt-3.5-turbo-16k")

llm = None
llm: Optional[Any] = None
if "claude" in model:
import anthropic

Expand Down
10 changes: 6 additions & 4 deletions examples/agents/translator.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
import os
from typing import Any, Optional

from dotenv import load_dotenv

from log10.agents.camel import camel_agent
from log10.anthropic import Anthropic
from log10.llm import NoopLLM
from log10.load import log10
from log10.agents.camel import camel_agent
from dotenv import load_dotenv

from log10.openai import OpenAI

load_dotenv()
Expand All @@ -14,7 +16,7 @@

max_turns = 30

llm = None
llm: Optional[Any] = None
summary_model = None
if "claude" in model:
import anthropic
Expand Down
5 changes: 3 additions & 2 deletions examples/evals/basic_eval.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os
from typing import Any, Optional

from log10.anthropic import Anthropic
from log10.evals import eval
from log10.openai import OpenAI
Expand All @@ -7,7 +8,7 @@
provider = "openai" # "anthropic"

# TODO: Replace with LLM abstraction.
llm = None
llm: Optional[Any] = None
if provider == "openai":
llm = OpenAI(
{
Expand Down
9 changes: 5 additions & 4 deletions examples/evals/compile.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
from typing import Any, Optional

from log10.anthropic import Anthropic
from log10.llm import Message, NoopLLM
from log10.load import log10
from log10.evals import compile
from log10.llm import Message, NoopLLM, Role
from log10.openai import OpenAI
from log10.tools import code_extractor

Expand All @@ -10,7 +11,7 @@
# model = "claude-1"


llm = None
llm: Optional[Any] = None
if "claude" in model:
llm = Anthropic({"model": model})
extraction_model = "claude-1-100k"
Expand All @@ -24,7 +25,7 @@

# First, write a hello world program
messages = [
Message(role="system", content="You are an expert C programmer."),
Message(role=Role.system, content="You are an expert C programmer."),
Message(
role="user",
content="Write a hello world program. Insert a null character after the hello world",
Expand Down
7 changes: 4 additions & 3 deletions examples/evals/fuzzy.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
import os
from log10.openai import OpenAI
from typing import Any, Optional

from log10.anthropic import Anthropic
from log10.evals import eval
from log10.openai import OpenAI

# Choose provider
provider = "anthropic" # "anthropic"
llm = None
llm: Optional[Any] = None
if provider == "openai":
llm = OpenAI(
{
Expand Down
Loading