You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Traceback (most recent call last):
File "/home/machinelawing/LLM/LaWGPT/utils/callbacks.py", line 47, in gentask
ret = self.mfunc(callback=_callback, **self.kwargs)
File "/home/machinelawing/LLM/LaWGPT/webui.py", line 140, in generate_with_callback
model.generate(**kwargs)
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/transformers/generation/utils.py", line 1538, in generate
return self.greedy_search(
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/transformers/generation/utils.py", line 2362, in greedy_search
outputs = self(
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py", line 691, in forward
outputs = self.model(
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py", line 579, in forward
layer_outputs = decoder_layer(
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py", line 291, in forward
hidden_states = self.input_layernorm(hidden_states)
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py", line 88, in forward
return (self.weight * hidden_states).to(input_dtype)
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!
Traceback (most recent call last):
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/gradio/routes.py", line 439, in run_predict
output = await app.get_blocks().process_api(
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/gradio/blocks.py", line 1384, in process_api
result = await self.call_function(
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/gradio/blocks.py", line 1103, in call_function
prediction = await utils.async_iteration(iterator)
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/gradio/utils.py", line 343, in async_iteration
return await iterator.anext()
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/gradio/utils.py", line 336, in anext
return await anyio.to_thread.run_sync(
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/anyio/to_thread.py", line 33, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 877, in run_sync_in_worker_thread
return await future
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 807, in run
result = context.run(func, *args)
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/gradio/utils.py", line 319, in run_sync_iterator_async
return next(iterator)
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/gradio/utils.py", line 688, in gen_wrapper
yield from f(*args, **kwargs)
File "/home/machinelawing/LLM/LaWGPT/webui.py", line 156, in evaluate
print(decoded_output)
UnboundLocalError: local variable 'decoded_output' referenced before assignment
The text was updated successfully, but these errors were encountered:
bash scripts/webui.sh
Traceback (most recent call last):
File "/home/machinelawing/LLM/LaWGPT/utils/callbacks.py", line 47, in gentask
ret = self.mfunc(callback=_callback, **self.kwargs)
File "/home/machinelawing/LLM/LaWGPT/webui.py", line 140, in generate_with_callback
model.generate(**kwargs)
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/transformers/generation/utils.py", line 1538, in generate
return self.greedy_search(
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/transformers/generation/utils.py", line 2362, in greedy_search
outputs = self(
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py", line 691, in forward
outputs = self.model(
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py", line 579, in forward
layer_outputs = decoder_layer(
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py", line 291, in forward
hidden_states = self.input_layernorm(hidden_states)
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py", line 88, in forward
return (self.weight * hidden_states).to(input_dtype)
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!
Traceback (most recent call last):
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/gradio/routes.py", line 439, in run_predict
output = await app.get_blocks().process_api(
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/gradio/blocks.py", line 1384, in process_api
result = await self.call_function(
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/gradio/blocks.py", line 1103, in call_function
prediction = await utils.async_iteration(iterator)
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/gradio/utils.py", line 343, in async_iteration
return await iterator.anext()
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/gradio/utils.py", line 336, in anext
return await anyio.to_thread.run_sync(
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/anyio/to_thread.py", line 33, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 877, in run_sync_in_worker_thread
return await future
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 807, in run
result = context.run(func, *args)
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/gradio/utils.py", line 319, in run_sync_iterator_async
return next(iterator)
File "/home/machinelawing/LLM/lawgpt_env/lib/python3.10/site-packages/gradio/utils.py", line 688, in gen_wrapper
yield from f(*args, **kwargs)
File "/home/machinelawing/LLM/LaWGPT/webui.py", line 156, in evaluate
print(decoded_output)
UnboundLocalError: local variable 'decoded_output' referenced before assignment
The text was updated successfully, but these errors were encountered: