From 6e400e26d1362a5fe3d390e8e5386f572357054a Mon Sep 17 00:00:00 2001 From: millerh1 Date: Sat, 19 Apr 2025 15:20:04 +0000 Subject: [PATCH 1/2] Added support for exposing GPUs to docker code executor --- .../agents/_code_executor_agent.py | 36 +++++++++++++++++++ .../docker/_docker_code_executor.py | 5 +++ 2 files changed, 41 insertions(+) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py index ae9312cc7d67..5d84534386b9 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py @@ -164,6 +164,42 @@ async def run_code_executor_agent() -> None: asyncio.run(run_code_executor_agent()) + In this example, we show how to set up a `CodeExecutorAgent` agent that uses the + :py:class:`~docker.types.DeviceRequest` to expose a GPU to the container for cuda-accelerated code execution. + + .. code-block:: python + + import asyncio + from autogen_agentchat.agents import CodeExecutorAgent + from autogen_agentchat.messages import TextMessage + from autogen_ext.code_executors.docker import DockerCommandLineCodeExecutor + from autogen_core import CancellationToken + from docker.types import DeviceRequest + + async def run_code_executor_agent() -> None: + # Create a code executor agent that uses a Docker container to execute code. + code_executor = DockerCommandLineCodeExecutor(work_dir="coding", device_requests=[DeviceRequest(count=-1, capabilities=[['gpu']])]) + await code_executor.start() + code_executor_agent = CodeExecutorAgent("code_executor", code_executor=code_executor) + + # Display the GPU information + task = TextMessage( + content='''Here is some code + ```bash + nvidia-smi + ``` + ''', + source="user", + ) + response = await code_executor_agent.on_messages([task], CancellationToken()) + print(response.chat_message) + + # Stop the code executor. + await code_executor.stop() + + + asyncio.run(run_code_executor_agent()) + In the following example, we show how to setup `CodeExecutorAgent` without `model_client` parameter for executing code blocks generated by other agents in a group chat using :py:class:`~autogen_ext.code_executors.docker.DockerCommandLineCodeExecutor` .. code-block:: python diff --git a/python/packages/autogen-ext/src/autogen_ext/code_executors/docker/_docker_code_executor.py b/python/packages/autogen-ext/src/autogen_ext/code_executors/docker/_docker_code_executor.py index fda23da0b349..6e2c4f420443 100644 --- a/python/packages/autogen-ext/src/autogen_ext/code_executors/docker/_docker_code_executor.py +++ b/python/packages/autogen-ext/src/autogen_ext/code_executors/docker/_docker_code_executor.py @@ -14,6 +14,7 @@ from hashlib import sha256 from pathlib import Path from typing import Any, Callable, ClassVar, Dict, List, Optional, ParamSpec, Tuple, Union +from docker.types import DeviceRequest from autogen_core import CancellationToken, Component from autogen_core.code_executor import ( @@ -116,6 +117,7 @@ class DockerCommandLineCodeExecutor(CodeExecutor, Component[DockerCommandLineCod stop_container (bool, optional): If true, will automatically stop the container when stop is called, when the context manager exits or when the Python process exits with atext. Defaults to True. + device_requests (Optional[List[DeviceRequest]], optional): A list of device request instances to add to the container for exposing GPUs (e.g., [docker.types.DeviceRequest(count=-1, capabilities=[['gpu']])]) Defaults to None. functions (List[Union[FunctionWithRequirements[Any, A], Callable[..., Any]]]): A list of functions that are available to the code executor. Default is an empty list. functions_module (str, optional): The name of the module that will be created to store the functions. Defaults to "functions". extra_volumes (Optional[Dict[str, Dict[str, str]]], optional): A dictionary of extra volumes (beyond the work_dir) to mount to the container; @@ -163,6 +165,7 @@ def __init__( bind_dir: Optional[Union[Path, str]] = None, auto_remove: bool = True, stop_container: bool = True, + device_requests: Optional[List[DeviceRequest]] = None, functions: Sequence[ Union[ FunctionWithRequirements[Any, A], @@ -229,6 +232,7 @@ def __init__( self._extra_hosts = extra_hosts if extra_hosts is not None else {} self._init_command = init_command self._delete_tmp_files = delete_tmp_files + self._device_requests = device_requests # Setup could take some time so we intentionally wait for the first code block to do it. if len(functions) > 0: @@ -488,6 +492,7 @@ async def start(self) -> None: volumes={str(self.bind_dir.resolve()): {"bind": "/workspace", "mode": "rw"}, **self._extra_volumes}, working_dir="/workspace", extra_hosts=self._extra_hosts, + device_requests=self._device_requests, ) await asyncio.to_thread(self._container.start) From 9c5651e7151fe8baf9aa7b46be2573cc68aaa2d5 Mon Sep 17 00:00:00 2001 From: millerh1 Date: Sat, 19 Apr 2025 16:34:26 +0000 Subject: [PATCH 2/2] Passing checks now. --- .../src/autogen_agentchat/agents/_code_executor_agent.py | 5 ++++- .../code_executors/docker/_docker_code_executor.py | 5 +++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py index 5d84534386b9..8006164e5dec 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py @@ -176,9 +176,12 @@ async def run_code_executor_agent() -> None: from autogen_core import CancellationToken from docker.types import DeviceRequest + async def run_code_executor_agent() -> None: # Create a code executor agent that uses a Docker container to execute code. - code_executor = DockerCommandLineCodeExecutor(work_dir="coding", device_requests=[DeviceRequest(count=-1, capabilities=[['gpu']])]) + code_executor = DockerCommandLineCodeExecutor( + work_dir="coding", device_requests=[DeviceRequest(count=-1, capabilities=[["gpu"]])] + ) await code_executor.start() code_executor_agent = CodeExecutorAgent("code_executor", code_executor=code_executor) diff --git a/python/packages/autogen-ext/src/autogen_ext/code_executors/docker/_docker_code_executor.py b/python/packages/autogen-ext/src/autogen_ext/code_executors/docker/_docker_code_executor.py index 6e2c4f420443..aaef37c0b2b5 100644 --- a/python/packages/autogen-ext/src/autogen_ext/code_executors/docker/_docker_code_executor.py +++ b/python/packages/autogen-ext/src/autogen_ext/code_executors/docker/_docker_code_executor.py @@ -14,7 +14,6 @@ from hashlib import sha256 from pathlib import Path from typing import Any, Callable, ClassVar, Dict, List, Optional, ParamSpec, Tuple, Union -from docker.types import DeviceRequest from autogen_core import CancellationToken, Component from autogen_core.code_executor import ( @@ -26,6 +25,8 @@ from pydantic import BaseModel from typing_extensions import Self +from docker.types import DeviceRequest + from .._common import ( CommandLineCodeResult, build_python_functions_file, @@ -117,7 +118,7 @@ class DockerCommandLineCodeExecutor(CodeExecutor, Component[DockerCommandLineCod stop_container (bool, optional): If true, will automatically stop the container when stop is called, when the context manager exits or when the Python process exits with atext. Defaults to True. - device_requests (Optional[List[DeviceRequest]], optional): A list of device request instances to add to the container for exposing GPUs (e.g., [docker.types.DeviceRequest(count=-1, capabilities=[['gpu']])]) Defaults to None. + device_requests (Optional[List[DeviceRequest]], optional): A list of device request instances to add to the container for exposing GPUs (e.g., [docker.types.DeviceRequest(count=-1, capabilities=[['gpu']])]). Defaults to None. functions (List[Union[FunctionWithRequirements[Any, A], Callable[..., Any]]]): A list of functions that are available to the code executor. Default is an empty list. functions_module (str, optional): The name of the module that will be created to store the functions. Defaults to "functions". extra_volumes (Optional[Dict[str, Dict[str, str]]], optional): A dictionary of extra volumes (beyond the work_dir) to mount to the container;