Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions nilai-attestation/src/nilai_attestation/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,18 @@
from fastapi import FastAPI
from nilai_attestation.routers import private, public

# Fast API and serving


import logging

logging.getLogger("nv_attestation_sdk").setLevel(logging.WARNING)
logging.getLogger("sdk-logger").setLevel(logging.WARNING)
logging.getLogger("sdk-console").setLevel(logging.WARNING)
logging.getLogger("sdk-file").setLevel(logging.WARNING)
logging.getLogger("gpu-verifier-event").setLevel(logging.WARNING)
logging.getLogger("gpu-verifier-info").setLevel(logging.WARNING)


description = """
An AI model serving platform powered by secure, confidential computing.
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Attester: Generate an attestation token from local evidence
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
from nv_attestation_sdk import attestation # type: ignore

import subprocess
from functools import lru_cache
import logging

logger = logging.getLogger(__name__)

NRAS_URL = "https://nras.attestation.nvidia.com/v3/attest/gpu"
OCSP_URL = "https://ocsp.ndis.nvidia.com/"
RIM_URL = "https://rim.attestation.nvidia.com/v1/rim/"


@lru_cache(maxsize=1)
def is_nvidia_gpu_available() -> bool:
"""Check if an NVIDIA GPU with compute capability is available in the system and cache the result.

Returns:
bool: True if an NVIDIA GPU is available and compute capability is ON, False otherwise.
"""
try:
# Run the command and capture its output
result = subprocess.run(
["nvidia-smi", "conf-compute", "-f"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
text=True, # ensures stdout/stderr are strings not bytes
)

output = result.stdout.strip()
if "ON" in output:
return True
else:
return False

except (subprocess.CalledProcessError, FileNotFoundError):
return False


@lru_cache(maxsize=1)
def get_client() -> attestation.Attestation:
"""Create and configure the attestation client with appropriate verifiers.

This function initializes an attestation client and configures it based on the availability
of an NVIDIA GPU. If a GPU is available, a remote verifier is added. Otherwise, a local
verifier is configured.

Returns:
attestation.Attestation: A configured attestation client instance.
"""
# Create and configure the attestation client.
client = attestation.Attestation()
client.set_name("nilai-attestation-module")
logger.info("Checking if NVIDIA GPU is available")

if is_nvidia_gpu_available():
logger.info("NVIDIA GPU is available")
# Configure the remote verifier.
# WARNING: The next statement happens at a global level. It shall only be done once.
client.add_verifier(
attestation.Devices.GPU, attestation.Environment.REMOTE, NRAS_URL, ""
)
else:
logger.info("NVIDIA GPU is not available")
# WARNING: The next statement happens at a global level. It shall only be done once.
client.add_verifier(
attestation.Devices.GPU,
attestation.Environment.LOCAL,
"",
"",
OCSP_URL,
RIM_URL,
)
return client
Original file line number Diff line number Diff line change
Expand Up @@ -4,49 +4,15 @@
# Attester: Generate an attestation token from local evidence
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
from nv_attestation_sdk import attestation # type: ignore
import subprocess
from functools import lru_cache
from nilai_attestation.attestation.nvtrust import is_nvidia_gpu_available, get_client
import base64
from nilai_common import Nonce, NVAttestationToken
import logging

from nilai_common.logger import setup_logger
logger = logging.getLogger(__name__)

logger = setup_logger(__name__)

NRAS_URL = "https://nras.attestation.nvidia.com/v3/attest/gpu"
OCSP_URL = "https://ocsp.ndis.nvidia.com/"
RIM_URL = "https://rim.attestation.nvidia.com/v1/rim/"


@lru_cache(maxsize=1)
def is_nvidia_gpu_available() -> bool:
"""Check if an NVIDIA GPU with compute capability is available in the system and cache the result.

Returns:
bool: True if an NVIDIA GPU is available and compute capability is ON, False otherwise.
"""
try:
# Run the command and capture its output
result = subprocess.run(
["nvidia-smi", "conf-compute", "-f"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
text=True, # ensures stdout/stderr are strings not bytes
)

output = result.stdout.strip()
if "ON" in output:
return True
else:
return False

except (subprocess.CalledProcessError, FileNotFoundError):
return False


def nv_attest(nonce: Nonce, name: str = "thisNode1") -> NVAttestationToken:
def nv_attest(nonce: Nonce) -> NVAttestationToken:
"""Generate an attestation token from local evidence.

Args:
Expand All @@ -55,41 +21,24 @@ def nv_attest(nonce: Nonce, name: str = "thisNode1") -> NVAttestationToken:
Returns:
NVAttestationToken: The attestation token response
"""
# Create and configure the attestation client.
client = attestation.Attestation()
client.set_name(name)
client = get_client()
client.set_nonce(nonce)

logger.info("Checking if NVIDIA GPU is available")
evidence_list = []
if is_nvidia_gpu_available():
logger.info("NVIDIA GPU is available")
# Configure the remote verifier.
client.add_verifier(
attestation.Devices.GPU, attestation.Environment.REMOTE, NRAS_URL, ""
)
# Collect evidence and perform attestation.
evidence_list = client.get_evidence()

else:
logger.info("NVIDIA GPU is not available")
client.add_verifier(
attestation.Devices.GPU,
attestation.Environment.LOCAL,
"",
"",
OCSP_URL,
RIM_URL,
)
logger.info(f"Using local verifier {client.get_verifiers()}")

evidence_list = client.get_evidence(options={"no_gpu_mode": True})
# Collect evidence and perform attestation.
options = {}
if not is_nvidia_gpu_available():
options["no_gpu_mode"] = True

evidence_list = client.get_evidence(options=options)
logger.info(f"Evidence list: {evidence_list}")

# Attestation result
attestation_result = client.attest(evidence_list)

logger.info(f"Attestation result: {attestation_result}")

# Retrieve the attestation token and return it wrapped in our model
token: str = client.get_token()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
from nilai_common.api_model import AttestationReport
from nv_attestation_sdk import attestation # type: ignore
import json
import base64
from nilai_common.logger import setup_logger
from nilai_attestation.attestation.nvtrust import get_client

logger = setup_logger(__name__)

Expand Down Expand Up @@ -43,9 +43,7 @@
}


def verify_attestation(
attestation_report: AttestationReport, name: str = "thisNode1"
) -> bool:
def verify_attestation(attestation_report: AttestationReport) -> bool:
"""Verify an NVIDIA attestation token against a policy.

Args:
Expand All @@ -58,12 +56,8 @@ def verify_attestation(

# Create an attestation client instance for token verification.
logger.info(f"Attestation report: {attestation_report}")
client = attestation.Attestation()
client.set_name(name)
client = get_client()
client.set_nonce(attestation_report.nonce)
client.add_verifier(
attestation.Devices.GPU, attestation.Environment.REMOTE, NRAS_URL, ""
)

token = base64.b64decode(attestation_report.gpu_attestation).decode("utf-8")
logger.info(f"Token: {token}")
Expand Down
4 changes: 0 additions & 4 deletions packages/nilai-common/src/nilai_common/logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,3 @@ def setup_logger(
logger.addHandler(file_handler)

return logger


# Default logger instance
default_logger = setup_logger("nilai")
Loading
Loading