Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

'OpenAIMultiModal' object has no attribute '__pydantic_private__', lavague 1.1.19 python 3.12.7 #631

Open
pedron1tor opened this issue Dec 25, 2024 · 0 comments

Comments

@pedron1tor
Copy link

Describe the bug
I cannot even use the basic demo workflow with Groq
To Reproduce
Versions:
pip 24.3.1
python-version: 3.12.7
LaVague version: 1.1.19
Steps to reproduce the behavior:

  1. Install requirements
    pip install llama-index-embeddings-huggingface
    pip install llama-index-llms-groq
    pip install lavague
    requirements.txt: https://gist.github.com/pedron1tor/82e4860195a3f485d835986e8dc88499
  2. Use this code.
import lavague
from lavague.drivers.selenium import SeleniumDriver
from lavague.core import ActionEngine, WorldModel
from lavague.core.agents import WebAgent
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.groq import Groq
driver = SeleniumDriver(headless=True)
llm = Groq(model="mixtral-8x7b-32768")

embedding = HuggingFaceEmbedding(
    model_name="BAAI/bge-small-en-v1.5"
)

action_engine = ActionEngine(driver=driver, llm=llm, embedding=embedding)
world_model = WorldModel()

# Create Web Agent
agent = WebAgent(world_model, action_engine)

# Set URL
agent.get("https://huggingface.co/docs")

# Launch the agent in the Agent Gradio Demo mode
agent.demo("Go on the quicktour of PEFT") 

Expected behavior
I would expect the code to work, unless I'm doing something wrong which I doubt.

Error code

---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
Cell In[2], line 14
      8 llm = Groq(model="mixtral-8x7b-32768")
     10 embedding = HuggingFaceEmbedding(
     11     model_name="BAAI/bge-small-en-v1.5"
     12 )
---> 14 action_engine = ActionEngine(driver=driver, llm=llm, embedding=embedding)
     15 world_model = WorldModel()
     17 # Create Web Agent

File ~/venv/lib/python3.12/site-packages/lavague/core/action_engine.py:84, in ActionEngine.__init__(self, driver, navigation_engine, python_engine, navigation_control, llm, embedding, retriever, prompt_template, extractor, time_between_actions, n_attempts, logger)
     72     navigation_engine = NavigationEngine(
     73         driver=driver,
     74         llm=llm,
   (...)
     81         embedding=embedding,
     82     )
     83 if python_engine is None:
---> 84     python_engine = PythonEngine(driver, llm, embedding)
     85 if navigation_control is None:
     86     navigation_control = NavigationControl(
     87         driver,
     88         time_between_actions=time_between_actions,
     89         navigation_engine=navigation_engine,
     90     )

File ~/venv/lib/python3.12/site-packages/lavague/core/python_engine.py:66, in PythonEngine.__init__(self, driver, llm, embedding, logger, clean_html, ocr_mm_llm, ocr_llm, display, batch_size, confidence_threshold, temp_screenshots_path, n_search_attemps)
     64 self.logger = logger
     65 self.display = display
---> 66 self.ocr_mm_llm = ocr_mm_llm or OpenAIMultiModal(
     67     model="gpt-4o-mini", temperature=DEFAULT_TEMPERATURE
     68 )
     69 self.ocr_llm = ocr_llm or self.llm
     70 self.batch_size = batch_size

File ~/venv/lib/python3.12/site-packages/llama_index/multi_modal_llms/openai/base.py:107, in OpenAIMultiModal.__init__(self, model, temperature, max_new_tokens, additional_kwargs, context_window, max_retries, timeout, image_detail, api_key, api_base, api_version, messages_to_prompt, completion_to_prompt, callback_manager, default_headers, http_client, **kwargs)
     87 def __init__(
     88     self,
     89     model: str = "gpt-4-vision-preview",
   (...)
    105     **kwargs: Any,
    106 ) -> None:
--> 107     self._messages_to_prompt = messages_to_prompt or generic_messages_to_prompt
    108     self._completion_to_prompt = completion_to_prompt or (lambda x: x)
    109     api_key, api_base, api_version = resolve_openai_credentials(
    110         api_key=api_key,
    111         api_base=api_base,
    112         api_version=api_version,
    113     )

File ~/venv/lib/python3.12/site-packages/pydantic/main.py:901, in BaseModel.__setattr__(self, name, value)
    896     raise AttributeError(
    897         f'{name!r} is a ClassVar of `{self.__class__.__name__}` and cannot be set on an instance. '
    898         f'If you want to set a value on the class, use `{self.__class__.__name__}.{name} = value`.'
    899     )
    900 elif not _fields.is_valid_field_name(name):
--> 901     if self.__pydantic_private__ is None or name not in self.__private_attributes__:
    902         _object_setattr(self, name, value)
    903     else:

File ~/venv/lib/python3.12/site-packages/pydantic/main.py:889, in BaseModel.__getattr__(self, item)
    887 else:
    888     if hasattr(self.__class__, item):
--> 889         return super().__getattribute__(item)  # Raises AttributeError if appropriate
    890     else:
    891         # this is the current error
    892         raise AttributeError(f'{type(self).__name__!r} object has no attribute {item!r}')

AttributeError: 'OpenAIMultiModal' object has no attribute '__pydantic_private__'
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant