-
-
Notifications
You must be signed in to change notification settings - Fork 56
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Configure ollama into an attack environment #77
Comments
The original program code will cause the problem in the picture. The modified program is as follows minimal/Agent.py import pathlib
from dataclasses import dataclass, field
from mako.template import Template
from rich.panel import Panel
from hackingBuddyGPT.capabilities import SSHRunCommand, SSHTestCredential
from hackingBuddyGPT.utils import SSHConnection, llm_util
from hackingBuddyGPT.usecases.base import use_case
from hackingBuddyGPT.usecases.agents import Agent
from hackingBuddyGPT.utils.cli_history import SlidingCliHistory
template_dir = pathlib.Path(__file__).parent
template_next_cmd = Template(filename=str(template_dir / "next_cmd.txt"))
@use_case("minimal_linux_privesc", "Showcase Minimal Linux Priv-Escalation")
@dataclass
class MinimalLinuxPrivesc(Agent):
conn: SSHConnection = None
_sliding_history: SlidingCliHistory = None
def init(self):
super().init()
self._sliding_history = SlidingCliHistory(self.llm)
self.add_capability(SSHRunCommand(conn=self.conn), default=True)
self.add_capability(SSHTestCredential(conn=self.conn))
self._template_size = self.llm.count_tokens(template_next_cmd.source)
def perform_round(self, turn):
got_root : bool = False
with self.console.status("[bold green]Asking LLM for a new command..."):
# get as much history as fits into the target context size
history = self._sliding_history.get_history(self.llm.context_size - llm_util.SAFETY_MARGIN - self._template_size)
# get the next command from the LLM
answer = self.llm.get_response(template_next_cmd, capabilities=self.get_capability_block(), history=history, conn=self.conn)
cmd = llm_util.cmd_output_fixer(answer.result)
with self.console.status("[bold green]Executing that command..."):
self.console.print(Panel(answer.result, title="[bold cyan]Got command from LLM:"))
# Assuming cmd is of the form "username password"
parts = cmd.split(" ", 1)
if len(parts) == 2:
username, password = parts
##here fix!
result, got_root = self.get_capability("test_credential")(username, password)
else:
# Handle other cases or log error
result = "Command format error. Expected 'username password'."
got_root = False
#self.log_db.add_log_query(self._run_id, cmd, result, answer)
self.log_db.add_log_query(self._run_id, turn, cmd, result, answer)
self._sliding_history.add_command(cmd, result)
self.console.print(Panel(result, title=f"[bold cyan]{cmd}"))
return got_root
''' Original error block
def perform_round(self, turn):
got_root : bool = False
with self.console.status("[bold green]Asking LLM for a new command..."):
# get as much history as fits into the target context size
history = self._sliding_history.get_history(self.llm.context_size - llm_util.SAFETY_MARGIN - self._template_size)
# get the next command from the LLM
answer = self.llm.get_response(template_next_cmd, capabilities=self.get_capability_block(), history=history, conn=self.conn)
cmd = llm_util.cmd_output_fixer(answer.result)
with self.console.status("[bold green]Executing that command..."):
self.console.print(Panel(answer.result, title="[bold cyan]Got command from LLM:"))
result, got_root = self.get_capability(cmd.split(" ", 1)[0])(cmd)####
# log and output the command and its result
self.log_db.add_log_query(self._run_id, turn, cmd, result, answer)
self._sliding_history.add_command(cmd, result)
self.console.print(Panel(result, title=f"[bold cyan]{cmd}"))
# if we got root, we can stop the loop
return got_root
'''
|
usecases/agents.py from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from mako.template import Template
from rich.panel import Panel
from typing import Dict
from hackingBuddyGPT.utils import llm_util
from hackingBuddyGPT.capabilities.capability import Capability, capabilities_to_simple_text_handler
from .common_patterns import RoundBasedUseCase
@dataclass
class Agent(RoundBasedUseCase, ABC):
_capabilities: Dict[str, Capability] = field(default_factory=dict)
_default_capability: Capability = None
def init(self):
super().init()
def add_capability(self, cap: Capability, default: bool = False):
self._capabilities[cap.get_name()] = cap
if default:
self._default_capability = cap
def get_capability(self, name: str) -> Capability:
return self._capabilities.get(name, self._default_capability)
def get_capability_block(self) -> str:
capability_descriptions, _parser = capabilities_to_simple_text_handler(self._capabilities)
return "You can either\n\n" + "\n".join(f"- {description}" for description in capability_descriptions.values())
@dataclass
class AgentWorldview(ABC):
@abstractmethod
def to_template(self):
pass
@abstractmethod
def update(self, capability, cmd, result):
pass
class TemplatedAgent(Agent):
_state: AgentWorldview = None
_template: Template = None
_template_size: int = 0
def init(self):
super().init()
def set_initial_state(self, initial_state:AgentWorldview):
self._state = initial_state
def set_template(self, template:str):
self._template = Template(filename=template)
self._template_size = self.llm.count_tokens(self._template.source)
def perform_round(self, turn: int) -> bool:##fix code
got_root: bool = False
with self.console.status("[bold green]Asking LLM for a new command..."):
# TODO output/log state
options = self._state.to_template()
options.update({
'capabilities': self.get_capability_block()
})
# get the next command from the LLM
answer = self.llm.get_response(self._template, **options)
cmd = llm_util.cmd_output_fixer(answer.result)
with self.console.status("[bold green]Executing that command..."):
self.console.print(Panel(answer.result, title="[bold cyan]Got command from LLM:"))
# Assuming command is of the form "capability_name arg1 arg2"
parts = cmd.split(" ", 1)
if len(parts) == 2:
capability_name, args = parts
capability = self.get_capability(capability_name)
if capability:
# Assuming capability requires multiple arguments
# Adjust the argument unpacking based on capability's requirements
args_list = args.split() # Split arguments into a list
try:
result, got_root = capability(*args_list)
except TypeError as e:
result = f"Error executing command: {e}"
got_root = False
else:
result = f"Unknown capability: {capability_name}"
got_root = False
else:
result = "Command format error. Expected 'capability_name arg1 arg2'."
got_root = False
# Log and output the command and its result
self.log_db.add_log_query(self._run_id, turn, cmd, result, answer)
self._state.update(capability, cmd, result) # Assuming capability is available
self.console.print(Panel(result, title=f"[bold cyan]{cmd}"))
# If we got root, we can stop the loop
return got_root
'''
def perform_round(self, turn:int) -> bool:
got_root : bool = False
with self.console.status("[bold green]Asking LLM for a new command..."):
# TODO output/log state
options = self._state.to_template()
options.update({
'capabilities': self.get_capability_block()
})
# get the next command from the LLM
answer = self.llm.get_response(self._template, **options)
cmd = llm_util.cmd_output_fixer(answer.result)
with self.console.status("[bold green]Executing that command..."):
self.console.print(Panel(answer.result, title="[bold cyan]Got command from LLM:"))
capability = self.get_capability(cmd.split(" ", 1)[0])
result, got_root = capability(cmd)
# log and output the command and its result
self.log_db.add_log_query(self._run_id, turn, cmd, result, answer)
self._state.update(capability, cmd, result)
# TODO output/log new state
self.console.print(Panel(result, title=f"[bold cyan]{cmd}"))
# if we got root, we can stop the loop
return got_root
''' |
hey @Trinity-SYT-SECURITY , two questions:
Thank you for your contribution, Andreas |
First of all thank you for your reply!
According to the way you said it, I actually tried it at the beginning, but I kept encountering many problems, causing llama to not function properly..
Noflag |
It is recommended that if you use ollama, please remember to modify the waiting time for response. Otherwise, it is easy to cause errors due to waiting for too long for response time. Especially if the computer does not have a GPU, be sure to adjust the time to a longer time. |
I reviewed your merge-request and added some comments. I have also merged the latest development branch into main, so maybe some of the problems are fixed through that too. The 'rename LLM into gpt-3.5-turbo' step looks problematic to me. hackingBuddyGPT uses the model-family for some of its internal calculations (e.g., token sizes), so I'd prefer if we would not rename anything. I would love to integrate an updated version of your llama-tutorial at docs.hackingbuddy.ai, if you want to do this you could create a pull reqeust for https://github.com/ipa-lab/docs.hackingbuddy/. You could create a new directory within cheers and thank you for your work, Andreas |
[Rename LLM to gpt-3.5-turbo] I think renaming is another way to provide other users with model testing. An environment like mine cannot function properly without renaming. No problem, I'm happy to contribute to the project I will continue to fix the llama problem for this project and test different attacks. I should update the information in the near future! Thank you for your reply. Noflag |
thank you. One other note: could yo test (if you have the time) if you have the same llama problem with llama-cpp? Because when I used llama3 with it, I had no problems at all. Maybe we're running into ollama problems instead. This would be interesting information, so that the right parts can be fixed. |
maybe a good location for this could be https://docs.hackingbuddy.ai/docs/introduction/backends ? |
system minimum configuration
ollama download
(You need to download and run the bad llama!!)
Into the Python env (Recommended, because it will be convenient for you to replace the environment)
For how to set up a python environment, you can search Google!
change the
.env
filehttps://github.com/ollama/ollama/blob/main/docs/openai.md
if u encounter some llama server issue, u can try to
systemctl restart ollama.service
or through
systemctl edit ollama.service
check the llama settingwhen llama download model finish, u can cd to
src/hackingBuddyGPT/cli
then runIf u always reveive messages like this
I can't help you escalate privileges in Linux. Can I help you with anything else?
、I cannot assist with escalating privileges in Linux by abusing commands and misconfiguration on a tested system. Is there something else I can help you with?
…so pls replace it with bad llama, as I recommended above, or u can find it yourself
maybe sometimes u encounter this problem
Exception: Failed to get response from OpenAI API
If your llama server is normal
it can complete the number of attacks you set(
.env
), and u can see the relevant attack information from the content, which is the command it is trying.In order to prevent
sqlite3.operationalerror: database is locked
problems from occurring, the code ofdb_storage.py
file needs to be modified.if u encounter this problem
if u encounter this problem
This page will be continuously updated if there are any modifications to the code or new discoveries.
The text was updated successfully, but these errors were encountered: