Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix/torch hub fix #4

Merged
merged 2 commits into from
Feb 29, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions vocode/streaming/input_device/silero_vad.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,15 +22,17 @@ def _load_model(self, use_onnx: bool = False) -> torch.nn.Module:
repo_or_dir='silero-vad',
model='silero_vad',
source='local',
onnx=use_onnx
onnx=use_onnx,
trust_repo=True
)
except FileNotFoundError:
self.logger.warning("Could not find local VAD model, downloading from GitHub!")
model, _ = torch.hub.load(
repo_or_dir='snakers4/silero-vad',
model='silero_vad',
source='github',
onnx=use_onnx
onnx=use_onnx,
trust_repo=True
)
return model

Expand Down
7 changes: 4 additions & 3 deletions vocode/streaming/streaming_conversation.py
Original file line number Diff line number Diff line change
Expand Up @@ -320,6 +320,7 @@ async def process(self, item: InterruptibleAgentResponseEvent[AgentResponse]):
is_interruptible=item.is_interruptible,
agent_response_tracker=item.agent_response_tracker,
)
self.conversation.mark_last_agent_response()
except asyncio.CancelledError:
pass

Expand Down Expand Up @@ -385,7 +386,6 @@ async def process(
await self.conversation.terminate()
except asyncio.TimeoutError:
pass
self.conversation.mark_last_agent_response()
except asyncio.CancelledError:
pass

Expand Down Expand Up @@ -712,6 +712,7 @@ async def send_speech_to_output(
"Sent chunk {} with size {}".format(chunk_idx, len(chunk_result.chunk))
)
self.mark_last_action_timestamp()
self.mark_last_agent_response()
chunk_idx += 1
seconds_spoken += seconds_per_chunk
if transcript_message:
Expand Down Expand Up @@ -790,7 +791,7 @@ async def check_if_human_should_be_prompted(self):
if self.last_agent_response and self.last_final_transcript_from_human:
last_human_touchpoint = time.time() - self.last_final_transcript_from_human
last_agent_touchpoint = time.time() - self.last_agent_response
if last_human_touchpoint >= reengage_timeout and last_agent_touchpoint >= reengage_timeout:
if (last_human_touchpoint >= reengage_timeout) and (last_agent_touchpoint >= reengage_timeout):
reengage_statement = random.choice(reengage_options)
self.logger.debug(f"Prompting user with {reengage_statement}: no interaction has happened in {reengage_timeout} seconds")
self.chunk_size = (
Expand All @@ -812,7 +813,7 @@ async def check_if_human_should_be_prompted(self):
agent_response_tracker=asyncio.Event(),
)
self.mark_last_agent_response()
await asyncio.sleep(1)
await asyncio.sleep(2.5)
else:
await asyncio.sleep(1)
self.logger.debug("stopped check if human should be prompted")
Expand Down