Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
82 changes: 82 additions & 0 deletions src/app/endpoints/rlsapi_v1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
"""Handler for RHEL Lightspeed rlsapi v1 REST API endpoints.

This module provides the /infer endpoint for stateless inference requests
from the RHEL Lightspeed Command Line Assistant (CLA).
"""

import logging
from typing import Annotated, Any

from fastapi import APIRouter, Depends

from authentication import get_auth_dependency
from authentication.interface import AuthTuple
from authorization.middleware import authorize
from models.config import Action
from models.responses import (
ForbiddenResponse,
UnauthorizedResponse,
UnprocessableEntityResponse,
)
from models.rlsapi.requests import RlsapiV1InferRequest
from models.rlsapi.responses import RlsapiV1InferData, RlsapiV1InferResponse
from utils.suid import get_suid

logger = logging.getLogger(__name__)
router = APIRouter(tags=["rlsapi-v1"])


infer_responses: dict[int | str, dict[str, Any]] = {
200: RlsapiV1InferResponse.openapi_response(),
401: UnauthorizedResponse.openapi_response(
examples=["missing header", "missing token"]
),
403: ForbiddenResponse.openapi_response(examples=["endpoint"]),
422: UnprocessableEntityResponse.openapi_response(),
}


@router.post("/infer", responses=infer_responses)
@authorize(Action.RLSAPI_V1_INFER)
async def infer_endpoint(
infer_request: RlsapiV1InferRequest,
auth: Annotated[AuthTuple, Depends(get_auth_dependency())],
) -> RlsapiV1InferResponse:
"""Handle rlsapi v1 /infer requests for stateless inference.

This endpoint serves requests from the RHEL Lightspeed Command Line Assistant (CLA).

Accepts a question with optional context (stdin, attachments, terminal output,
system info) and returns an LLM-generated response.

Args:
infer_request: The inference request containing question and context.
auth: Authentication tuple from the configured auth provider.

Returns:
RlsapiV1InferResponse containing the generated response text and request ID.
"""
# Authentication enforced by get_auth_dependency(), authorization by @authorize decorator.
_ = auth

# Generate unique request ID
request_id = get_suid()

logger.info("Processing rlsapi v1 /infer request %s", request_id)

# Combine all input sources (question, stdin, attachments, terminal)
input_source = infer_request.get_input_source()
logger.debug("Combined input source length: %d", len(input_source))

# NOTE(major): Placeholder until we wire up the LLM integration.
response_text = (
"Inference endpoint is functional. "
"LLM integration will be added in a subsequent update."
)

return RlsapiV1InferResponse(
data=RlsapiV1InferData(
text=response_text,
request_id=request_id,
)
)
74 changes: 74 additions & 0 deletions tests/unit/app/endpoints/test_rlsapi_v1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
"""Unit tests for the rlsapi v1 /infer REST API endpoint."""

import pytest
from pydantic import ValidationError
from pytest_mock import MockerFixture

from app.endpoints.rlsapi_v1 import infer_endpoint
from authentication.interface import AuthTuple
from models.rlsapi.requests import (
RlsapiV1Attachment,
RlsapiV1Context,
RlsapiV1InferRequest,
RlsapiV1SystemInfo,
RlsapiV1Terminal,
)
from models.rlsapi.responses import RlsapiV1InferResponse
from tests.unit.utils.auth_helpers import mock_authorization_resolvers
from utils.suid import check_suid

MOCK_AUTH: AuthTuple = ("test_user_id", "test_user", True, "test_token")


@pytest.mark.asyncio
async def test_infer_minimal_request(mocker: MockerFixture) -> None:
"""Test /infer endpoint returns valid response with UUID request_id."""
mock_authorization_resolvers(mocker)
request = RlsapiV1InferRequest(question="How do I list files?")

response = await infer_endpoint(infer_request=request, auth=MOCK_AUTH)

assert isinstance(response, RlsapiV1InferResponse)
assert response.data.text
# Verify request_id is valid SUID
assert check_suid(response.data.request_id)


@pytest.mark.asyncio
async def test_infer_full_context_request(mocker: MockerFixture) -> None:
"""Test /infer endpoint handles full context (stdin, attachments, terminal)."""
mock_authorization_resolvers(mocker)
request = RlsapiV1InferRequest(
question="Why did this command fail?",
context=RlsapiV1Context(
stdin="some piped input",
attachments=RlsapiV1Attachment(contents="key=value", mimetype="text/plain"),
terminal=RlsapiV1Terminal(output="bash: command not found"),
systeminfo=RlsapiV1SystemInfo(os="RHEL", version="9.3", arch="x86_64"),
),
)

response = await infer_endpoint(infer_request=request, auth=MOCK_AUTH)

assert isinstance(response, RlsapiV1InferResponse)
assert response.data.text
assert response.data.request_id


@pytest.mark.asyncio
async def test_infer_generates_unique_request_ids(mocker: MockerFixture) -> None:
"""Test that each /infer call generates a unique request_id."""
mock_authorization_resolvers(mocker)
request = RlsapiV1InferRequest(question="How do I list files?")

response1 = await infer_endpoint(infer_request=request, auth=MOCK_AUTH)
response2 = await infer_endpoint(infer_request=request, auth=MOCK_AUTH)

assert response1.data.request_id != response2.data.request_id


@pytest.mark.parametrize("invalid_question", ["", " ", "\t\n"])
def test_infer_rejects_invalid_question(invalid_question: str) -> None:
"""Test that empty or whitespace-only questions are rejected."""
with pytest.raises(ValidationError):
RlsapiV1InferRequest(question=invalid_question)
Loading