@@ -360,7 +360,7 @@ def _prompt_to_llm_inputs(
360360
361361 return token_inputs (
362362 prompt_token_ids = prompt_token_ids ,
363- prompt_embeds = tokens_content .get (' prompt_embeds' ),
363+ prompt_embeds = tokens_content .get (" prompt_embeds" ),
364364 token_type_ids = token_type_ids ,
365365 multi_modal_data = multi_modal_data ,
366366 mm_processor_kwargs = mm_processor_kwargs ,
@@ -390,7 +390,7 @@ def _prompt_to_llm_inputs(
390390 return token_inputs (
391391 prompt = prompt_text ,
392392 prompt_token_ids = prompt_token_ids ,
393- prompt_embeds = text_content .get (' prompt_embeds' ),
393+ prompt_embeds = text_content .get (" prompt_embeds" ),
394394 multi_modal_data = multi_modal_data ,
395395 mm_processor_kwargs = mm_processor_kwargs ,
396396 )
@@ -436,7 +436,7 @@ async def _prompt_to_llm_inputs_async(
436436
437437 return token_inputs (
438438 prompt_token_ids = prompt_token_ids ,
439- prompt_embeds = tokens_content .get (' prompt_embeds' ),
439+ prompt_embeds = tokens_content .get (" prompt_embeds" ),
440440 multi_modal_data = multi_modal_data ,
441441 mm_processor_kwargs = mm_processor_kwargs ,
442442 )
@@ -465,7 +465,7 @@ async def _prompt_to_llm_inputs_async(
465465 return token_inputs (
466466 prompt = prompt_text ,
467467 prompt_token_ids = prompt_token_ids ,
468- prompt_embeds = text_content .get (' prompt_embeds' ),
468+ prompt_embeds = tokens_content .get (" prompt_embeds" ),
469469 multi_modal_data = multi_modal_data ,
470470 mm_processor_kwargs = mm_processor_kwargs ,
471471 )
0 commit comments