Skip to content

Commit 1a72372

Browse files
committed
move '/server_info' endpoint under the if envs.VLLM_SERVER_DEV_MODE:guard
Signed-off-by: Xihui Cang <xihuicang@gmail.com>
1 parent df44ffc commit 1a72372

File tree

1 file changed

+8
-9
lines changed

1 file changed

+8
-9
lines changed

vllm/entrypoints/openai/api_server.py

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -479,15 +479,6 @@ async def show_version():
479479
return JSONResponse(content=ver)
480480

481481

482-
@router.get("/server_info")
483-
async def show_server_info():
484-
if _global_state is None:
485-
server_info = {"vllm_config": "Vllm Config not available"}
486-
else:
487-
server_info = {"vllm_config": str(_global_state.vllmconfig)}
488-
return JSONResponse(content=server_info)
489-
490-
491482
@router.post("/v1/chat/completions",
492483
dependencies=[Depends(validate_json_request)])
493484
@with_cancellation
@@ -753,6 +744,14 @@ async def is_sleeping(raw_request: Request):
753744
logger.info("check whether the engine is sleeping")
754745
is_sleeping = await engine_client(raw_request).is_sleeping()
755746
return JSONResponse(content={"is_sleeping": is_sleeping})
747+
748+
@router.get("/server_info")
749+
async def show_server_info():
750+
if _global_state is None:
751+
server_info = {"vllm_config": "Vllm Config not available"}
752+
else:
753+
server_info = {"vllm_config": str(_global_state.vllmconfig)}
754+
return JSONResponse(content=server_info)
756755

757756

758757
@router.post("/invocations", dependencies=[Depends(validate_json_request)])

0 commit comments

Comments
 (0)