diff --git a/Cargo.lock b/Cargo.lock index af4763cdfc0..00cb3840245 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6863,6 +6863,7 @@ name = "nexus-external-api" version = "0.1.0" dependencies = [ "anyhow", + "base64 0.22.1", "chrono", "dropshot", "dropshot-api-manager-types", diff --git a/common/src/address.rs b/common/src/address.rs index 94ff1aa9d43..786b438ee23 100644 --- a/common/src/address.rs +++ b/common/src/address.rs @@ -25,52 +25,70 @@ pub const SLED_PREFIX: u8 = 64; // Multicast constants -/// IPv4 Source-Specific Multicast (SSM) subnet as defined in RFC 4607: -/// . +/// IPv4 Source-Specific Multicast (SSM) subnet. /// -/// RFC 4607 Section 3 allocates 232.0.0.0/8 as the IPv4 SSM address range. +/// See [RFC 4607 §3] for the IPv4 SSM address range allocation (232.0.0.0/8). /// This is a single contiguous block, unlike IPv6 which has per-scope ranges. -pub const IPV4_SSM_SUBNET: oxnet::Ipv4Net = - oxnet::Ipv4Net::new_unchecked(Ipv4Addr::new(232, 0, 0, 0), 8); +/// +/// [RFC 4607 §3]: https://www.rfc-editor.org/rfc/rfc4607#section-3 +pub const IPV4_SSM_SUBNET: Ipv4Net = + Ipv4Net::new_unchecked(Ipv4Addr::new(232, 0, 0, 0), 8); -/// IPv6 Source-Specific Multicast (SSM) subnet as defined in RFC 4607: -/// . +/// IPv6 Source-Specific Multicast (SSM) subnet. /// -/// RFC 4607 Section 3 specifies "FF3x::/32 for each scope x" - meaning one -/// /32 block per scope (FF30::/32, FF31::/32, ..., FF3F::/32). +/// See [RFC 4607 §3] for SSM scope allocation. The RFC specifies "ff3x::/32 +/// for each scope x" - meaning one /32 block per scope (ff30::/32, ff31::/32, +/// ..., ff3f::/32). /// /// We use /12 as an implementation convenience to match all these blocks with /// a single subnet. This works because all SSM addresses share the same first /// 12 bits: -/// - Bits 0-7: 11111111 (0xFF, multicast prefix) +/// - Bits 0-7: 11111111 (0xff, multicast prefix) /// - Bits 8-11: 0011 (flag field = 3, indicating SSM) -/// - Bits 12-15: xxxx (scope field, any value 0-F) +/// - Bits 12-15: xxxx (scope field, any value 0-f) /// -/// Thus FF30::/12 efficiently matches FF30:: through FF3F:FFFF:...:FFFF, +/// Thus ff30::/12 efficiently matches ff30:: through ff3f:ffff:...:ffff, /// covering all SSM scopes. -pub const IPV6_SSM_SUBNET: oxnet::Ipv6Net = oxnet::Ipv6Net::new_unchecked( - Ipv6Addr::new(0xff30, 0, 0, 0, 0, 0, 0, 0), - 12, -); +/// +/// This superset is used only for contains-based classification and validation +/// (e.g., `contains()` checks). It is not an allocation boundary. +/// +/// [RFC 4607 §3]: https://www.rfc-editor.org/rfc/rfc4607#section-3 +pub const IPV6_SSM_SUBNET: Ipv6Net = + Ipv6Net::new_unchecked(Ipv6Addr::new(0xff30, 0, 0, 0, 0, 0, 0, 0), 12); + +/// Maximum source IPs per SSM group member (per [RFC 3376] IGMPv3). +/// +/// [RFC 3376]: https://www.rfc-editor.org/rfc/rfc3376 +pub const MAX_SSM_SOURCE_IPS: usize = 64; /// IPv4 multicast address range (224.0.0.0/4). -/// See RFC 5771 (IPv4 Multicast Address Assignments): -/// +/// +/// See [RFC 5771] for IPv4 multicast address assignments. +/// +/// [RFC 5771]: https://www.rfc-editor.org/rfc/rfc5771 pub const IPV4_MULTICAST_RANGE: Ipv4Net = Ipv4Net::new_unchecked(Ipv4Addr::new(224, 0, 0, 0), 4); /// IPv4 link-local multicast subnet (224.0.0.0/24). +/// /// This range is reserved for local network control protocols and should not /// be routed beyond the local link. Includes addresses for protocols like /// OSPF (224.0.0.5), RIPv2 (224.0.0.9), and other local routing protocols. -/// See RFC 5771 Section 4: -/// +/// +/// See [RFC 5771 §4] for link-local multicast address assignments. The IANA +/// IPv4 Multicast Address Space registry is the canonical source for +/// assignments. +/// +/// [RFC 5771 §4]: https://www.rfc-editor.org/rfc/rfc5771#section-4 pub const IPV4_LINK_LOCAL_MULTICAST_SUBNET: Ipv4Net = Ipv4Net::new_unchecked(Ipv4Addr::new(224, 0, 0, 0), 24); /// IPv6 multicast address range (ff00::/8). -/// See RFC 4291 (IPv6 Addressing Architecture): -/// +/// +/// See [RFC 4291] for IPv6 addressing architecture. +/// +/// [RFC 4291]: https://www.rfc-editor.org/rfc/rfc4291 pub const IPV6_MULTICAST_RANGE: Ipv6Net = Ipv6Net::new_unchecked(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0), 8); @@ -82,25 +100,98 @@ pub const IPV6_MULTICAST_PREFIX: u16 = 0xff00; pub const IPV6_ADMIN_SCOPED_MULTICAST_PREFIX: u16 = 0xff04; /// IPv6 interface-local multicast subnet (ff01::/16). +/// /// These addresses are not routable and should not be added to IP pools. -/// See RFC 4291 Section 2.7 (multicast scope field): -/// -pub const IPV6_INTERFACE_LOCAL_MULTICAST_SUBNET: oxnet::Ipv6Net = - oxnet::Ipv6Net::new_unchecked( - Ipv6Addr::new(0xff01, 0, 0, 0, 0, 0, 0, 0), - 16, - ); +/// +/// See [RFC 4291 §2.7] for multicast scope field definitions. +/// +/// [RFC 4291 §2.7]: https://www.rfc-editor.org/rfc/rfc4291#section-2.7 +pub const IPV6_INTERFACE_LOCAL_MULTICAST_SUBNET: Ipv6Net = + Ipv6Net::new_unchecked(Ipv6Addr::new(0xff01, 0, 0, 0, 0, 0, 0, 0), 16); + +/// Last address in the IPv6 interface-local multicast subnet. +pub const IPV6_INTERFACE_LOCAL_MULTICAST_LAST: Ipv6Addr = Ipv6Addr::new( + 0xff01, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, +); /// IPv6 link-local multicast subnet (ff02::/16). +/// /// These addresses are not routable beyond the local link and should not be /// added to IP pools. -/// See RFC 4291 Section 2.7 (multicast scope field): -/// -pub const IPV6_LINK_LOCAL_MULTICAST_SUBNET: oxnet::Ipv6Net = - oxnet::Ipv6Net::new_unchecked( - Ipv6Addr::new(0xff02, 0, 0, 0, 0, 0, 0, 0), - 16, - ); +/// +/// See [RFC 4291 §2.7] for multicast scope field definitions. +/// +/// [RFC 4291 §2.7]: https://www.rfc-editor.org/rfc/rfc4291#section-2.7 +pub const IPV6_LINK_LOCAL_MULTICAST_SUBNET: Ipv6Net = + Ipv6Net::new_unchecked(Ipv6Addr::new(0xff02, 0, 0, 0, 0, 0, 0, 0), 16); + +/// Last address in the IPv6 link-local multicast subnet. +pub const IPV6_LINK_LOCAL_MULTICAST_LAST: Ipv6Addr = Ipv6Addr::new( + 0xff02, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, +); + +/// IPv6 reserved-scope multicast subnet (ff00::/16). +/// +/// Scope 0 is reserved - packets with this scope must not be originated and +/// must be silently dropped if received. These addresses should not be added +/// to IP pools. +/// +/// See [RFC 4291 §2.7] for multicast scope field definitions. +/// +/// [RFC 4291 §2.7]: https://www.rfc-editor.org/rfc/rfc4291#section-2.7 +pub const IPV6_RESERVED_SCOPE_MULTICAST_SUBNET: Ipv6Net = + Ipv6Net::new_unchecked(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0), 16); + +/// Last address in the IPv6 reserved-scope multicast subnet. +pub const IPV6_RESERVED_SCOPE_MULTICAST_LAST: Ipv6Addr = Ipv6Addr::new( + 0xff00, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, +); + +/// IPv4 GLOP addressing block (233.0.0.0/8). +/// +/// This range is reserved for GLOP addressing and should not be allocated from +/// IP pools for general multicast use. +/// +/// See [RFC 3180] for GLOP address allocation. +/// +/// [RFC 3180]: https://www.rfc-editor.org/rfc/rfc3180 +pub const IPV4_GLOP_MULTICAST_SUBNET: Ipv4Net = + Ipv4Net::new_unchecked(Ipv4Addr::new(233, 0, 0, 0), 8); + +/// IPv4 administratively scoped multicast subnet (239.0.0.0/8). +/// +/// This range is reserved for organization-local administrative scoping and +/// should not be allocated from IP pools for general multicast use. +/// +/// See [RFC 2365] for administratively scoped IP multicast. +/// +/// [RFC 2365]: https://www.rfc-editor.org/rfc/rfc2365 +pub const IPV4_ADMIN_SCOPED_MULTICAST_SUBNET: Ipv4Net = + Ipv4Net::new_unchecked(Ipv4Addr::new(239, 0, 0, 0), 8); + +/// Specifically reserved IPv4 multicast addresses. +/// +/// These addresses are reserved for specific protocols and should not be +/// allocated from IP pools. They fall outside the link-local range +/// (224.0.0.0/24) but are still reserved. +/// +/// - 224.0.1.1: NTP (Network Time Protocol, RFC 5905) +/// - 224.0.1.39: Cisco Auto-RP-Announce +/// - 224.0.1.40: Cisco Auto-RP-Discovery +/// - 224.0.1.129-132: PTP (Precision Time Protocol, IEEE 1588) +/// +/// See [IANA IPv4 Multicast Address Space Registry] for complete assignments. +/// +/// [IANA IPv4 Multicast Address Space Registry]: https://www.iana.org/assignments/multicast-addresses/multicast-addresses.xhtml +pub const IPV4_SPECIFIC_RESERVED_MULTICAST_ADDRS: [Ipv4Addr; 7] = [ + Ipv4Addr::new(224, 0, 1, 1), // NTP + Ipv4Addr::new(224, 0, 1, 39), // Cisco Auto-RP-Announce + Ipv4Addr::new(224, 0, 1, 40), // Cisco Auto-RP-Discovery + Ipv4Addr::new(224, 0, 1, 129), // PTP-primary + Ipv4Addr::new(224, 0, 1, 130), // PTP-alternate1 + Ipv4Addr::new(224, 0, 1, 131), // PTP-alternate2 + Ipv4Addr::new(224, 0, 1, 132), // PTP-alternate3 +]; /// maximum possible value for a tcp or udp port pub const MAX_PORT: u16 = u16::MAX; @@ -254,8 +345,9 @@ pub static NTP_OPTE_IPV6_SUBNET: LazyLock = LazyLock::new(|| { // Anycast is a mechanism in which a single IP address is shared by multiple // devices, and the destination is located based on routing distance. // -// This is covered by RFC 4291 in much more detail: -// +// See [RFC 4291 §2.6] for anycast address allocation. +// +// [RFC 4291 §2.6]: https://www.rfc-editor.org/rfc/rfc4291#section-2.6 // // Anycast addresses are always the "zeroeth" address within a subnet. We // always explicitly skip these addresses within our network. diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index 2738d6ff1fb..cd38fea5bb3 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -713,7 +713,7 @@ task: "multicast_reconciler" configured period: every m last completed activation: , triggered by started at (s ago) and ran for ms -warning: unknown background task: "multicast_reconciler" (don't know how to interpret details: Object {"disabled": Bool(false), "errors": Array [], "groups_created": Number(0), "groups_deleted": Number(0), "groups_verified": Number(0), "members_deleted": Number(0), "members_processed": Number(0)}) +warning: unknown background task: "multicast_reconciler" (don't know how to interpret details: Object {"disabled": Bool(false), "empty_groups_marked": Number(0), "errors": Array [], "groups_created": Number(0), "groups_deleted": Number(0), "groups_verified": Number(0), "members_deleted": Number(0), "members_processed": Number(0)}) task: "phantom_disks" configured period: every s @@ -1281,7 +1281,7 @@ task: "multicast_reconciler" configured period: every m last completed activation: , triggered by started at (s ago) and ran for ms -warning: unknown background task: "multicast_reconciler" (don't know how to interpret details: Object {"disabled": Bool(false), "errors": Array [], "groups_created": Number(0), "groups_deleted": Number(0), "groups_verified": Number(0), "members_deleted": Number(0), "members_processed": Number(0)}) +warning: unknown background task: "multicast_reconciler" (don't know how to interpret details: Object {"disabled": Bool(false), "empty_groups_marked": Number(0), "errors": Array [], "groups_created": Number(0), "groups_deleted": Number(0), "groups_verified": Number(0), "members_deleted": Number(0), "members_processed": Number(0)}) task: "phantom_disks" configured period: every s diff --git a/illumos-utils/src/opte/port_manager.rs b/illumos-utils/src/opte/port_manager.rs index 93cb56a7d86..b2534810645 100644 --- a/illumos-utils/src/opte/port_manager.rs +++ b/illumos-utils/src/opte/port_manager.rs @@ -836,7 +836,7 @@ impl PortManager { /// /// TODO: Once OPTE kernel module supports multicast group APIs, this /// method should be updated to configure OPTE port-level multicast - /// group membership. Note: multicast groups are fleet-wide and can span + /// group membership. Note: multicast groups are fleet-scoped and can span /// across VPCs. pub fn multicast_groups_ensure( &self, diff --git a/nexus/auth/src/authz/api_resources.rs b/nexus/auth/src/authz/api_resources.rs index 7f7126b2813..e86423e0c65 100644 --- a/nexus/auth/src/authz/api_resources.rs +++ b/nexus/auth/src/authz/api_resources.rs @@ -12,7 +12,7 @@ //! accept these `authz` types. //! //! The `authz` types can be passed to -//! [`crate::context::OpContext::authorize()`] to do an authorization check -- +//! [`OpContext::authorize()`] to do an authorization check -- //! is the caller allowed to perform some action on the resource? This is the //! primary way of doing authz checks in Nexus. //! @@ -153,7 +153,7 @@ where /// Fleets. /// /// This object is used for authorization checks on a Fleet by passing it as the -/// `resource` argument to [`crate::context::OpContext::authorize()`]. You +/// `resource` argument to [`OpContext::authorize()`]. You /// don't construct a `Fleet` yourself -- use the global [`FLEET`]. #[derive(Clone, Copy, Debug, Serialize, Deserialize)] pub struct Fleet; @@ -475,18 +475,16 @@ impl AuthorizedResource for IpPoolList { /// collection. /// /// **Authorization Model:** -/// - Multicast groups are fleet-wide resources (similar to IP pools). -/// - Any authenticated user within a silo in the fleet can create, list, read, -/// and modify groups. This includes project collaborators, silo collaborators, -/// and silo admins. -/// - Cross-silo multicast communication is enabled by fleet-wide access. +/// - Multicast groups are fleet-scoped resources. +/// - Groups are created when the first instance joins and deleted when the last +/// member leaves (implicit lifecycle). +/// - **List**: Any authenticated user in the fleet (for discovery). /// /// The fleet-level collection endpoint (`/v1/multicast-groups`) allows: -/// - Any authenticated user within the fleet's silos to create and list groups. -/// - Instances from different projects and silos can join the same multicast groups. +/// - Fleet-wide listing for all authenticated users (discovery). +/// - Instances from different projects and silos can join the same groups. /// -/// See `omicron.polar` for the detailed policy rules that grant fleet-wide -/// access to authenticated silo users for multicast group operations. +/// See `omicron.polar` for the detailed policy rules. #[derive(Clone, Copy, Debug)] pub struct MulticastGroupList; @@ -1393,35 +1391,21 @@ authz_resource! { // MulticastGroup Authorization // -// MulticastGroups are **fleet-scoped resources** (parent = "Fleet"), similar to -// IP pools, to enable efficient cross-project and cross-silo multicast -// communication. +// MulticastGroups are **fleet-scoped resources** with an implicit lifecycle: +// created when the first instance joins and deleted when the last member leaves. // // Authorization rules: -// - Creating/modifying groups: Any authenticated user within a silo in the fleet. -// This includes project collaborators, silo collaborators, and silo admins. -// - Listing groups: Any authenticated user within a silo in the fleet -// - Viewing individual groups: Any authenticated user within a silo in the fleet -// - Attaching instances to groups: only requires Instance::Modify permission -// (users can attach their own instances to any fleet-scoped group) +// - List/Read: Any authenticated user in their fleet +// - Attach/detach: Instance::Modify permission on the instance being attached // -// Fleet::Admin role can also perform all operations via the parent Fleet relation. -// -// See omicron.polar for the special `has_permission` rules that grant create/modify/ -// list/read access to authenticated silo users (including project collaborators), -// enabling cross-project and cross-silo multicast communication without requiring -// Fleet::Admin or Fleet::Viewer roles. -// -// Member management: `MulticastGroup` member attachments/detachments (instances -// joining/leaving groups) use the existing `MulticastGroup` and `Instance` -// authz resources rather than creating a separate `MulticastGroupMember` authz -// resource. +// See omicron.polar for the custom authorization rules. + authz_resource! { name = "MulticastGroup", parent = "Fleet", primary_key = Uuid, roles_allowed = false, - polar_snippet = FleetChild, + polar_snippet = Custom, } // Customer network integration resources nested below "Fleet" diff --git a/nexus/auth/src/authz/omicron.polar b/nexus/auth/src/authz/omicron.polar index a536370b2f0..fa1720bcf85 100644 --- a/nexus/auth/src/authz/omicron.polar +++ b/nexus/auth/src/authz/omicron.polar @@ -493,15 +493,11 @@ has_permission(actor: AuthenticatedActor, "create_child", ip_pool: IpPool) if actor.is_user and silo in actor.silo and silo.fleet = ip_pool.fleet; # Describes the policy for accessing "/v1/multicast-groups" in the API +# Groups are created when the first instance joins and deleted when the last leaves. resource MulticastGroupList { - permissions = [ - "list_children", - "create_child", - ]; + permissions = [ "list_children" ]; relations = { parent_fleet: Fleet }; - # Fleet Administrators can create multicast groups - "create_child" if "admin" on "parent_fleet"; # Fleet Viewers can list multicast groups "list_children" if "viewer" on "parent_fleet"; @@ -509,29 +505,28 @@ resource MulticastGroupList { has_relation(fleet: Fleet, "parent_fleet", multicast_group_list: MulticastGroupList) if multicast_group_list.fleet = fleet; -# Any authenticated user can create multicast groups in their fleet. -# This is necessary to allow silo users to create multicast groups for -# cross-project and cross-silo communication without requiring Fleet::Admin. -has_permission(actor: AuthenticatedActor, "create_child", multicast_group_list: MulticastGroupList) - if silo in actor.silo and silo.fleet = multicast_group_list.fleet; - # Any authenticated user can list multicast groups in their fleet. -# This is necessary because multicast groups are fleet-scoped resources that -# silo users need to discover and attach their instances to, without requiring -# Fleet::Viewer role. +# This enables silo users to discover groups for attaching instances, +# without requiring the Fleet::Viewer role. has_permission(actor: AuthenticatedActor, "list_children", multicast_group_list: MulticastGroupList) - if silo in actor.silo and silo.fleet = multicast_group_list.fleet; + if actor.is_user and silo in actor.silo and silo.fleet = multicast_group_list.fleet; + +# MulticastGroup is a fleet-level discovery resource. +# Join/leave authorization is gated by Instance::Modify, not the group itself. +resource MulticastGroup { + permissions = [ "read", "list_children" ]; + relations = { parent_fleet: Fleet }; +} +has_relation(fleet: Fleet, "parent_fleet", multicast_group: MulticastGroup) + if multicast_group.fleet = fleet; -# Any authenticated user can read and modify individual multicast groups in their fleet. -# Users can create, modify, and consume (attach instances to) multicast groups. -# This enables cross-project and cross-silo multicast while maintaining -# appropriate security boundaries via API authorization and underlay group -# membership validation. +# Any authenticated user can read multicast groups in their fleet has_permission(actor: AuthenticatedActor, "read", multicast_group: MulticastGroup) - if silo in actor.silo and silo.fleet = multicast_group.fleet; + if actor.is_user and silo in actor.silo and silo.fleet = multicast_group.fleet; -has_permission(actor: AuthenticatedActor, "modify", multicast_group: MulticastGroup) - if silo in actor.silo and silo.fleet = multicast_group.fleet; +# Any authenticated user can list members of multicast groups in their fleet +has_permission(actor: AuthenticatedActor, "list_children", multicast_group: MulticastGroup) + if actor.is_user and silo in actor.silo and silo.fleet = multicast_group.fleet; # Describes the policy for reading and writing the audit log resource AuditLog { diff --git a/nexus/db-model/src/multicast_group.rs b/nexus/db-model/src/multicast_group.rs index 06ab9f27350..6c9deea3de4 100644 --- a/nexus/db-model/src/multicast_group.rs +++ b/nexus/db-model/src/multicast_group.rs @@ -16,7 +16,7 @@ //! - Support Source-Specific Multicast (SSM) with configurable source IPs //! - Follow the Resource trait pattern for user-facing identity management //! - **Fleet-scoped** (not project-scoped) to enable cross-project multicast -//! - All use `DEFAULT_MULTICAST_VNI` (77) for consistent fleet-wide behavior +//! - All use `DEFAULT_MULTICAST_VNI` (77) for consistent fleet-scoped behavior //! //! ### VNI and Security Model //! @@ -25,7 +25,7 @@ //! traffic where each VPC receives its own VNI for tenant isolation. //! //! The shared VNI design reflects multicast's fleet-scoped authorization model: -//! groups are fleet resources (like IP pools) that can span projects and silos. +//! groups are fleet-scoped resources that can span projects and silos. //! Forwarding occurs through Dendrite's bifurcated NAT architecture, which //! translates external multicast addresses to underlay IPv6 groups at the switch. //! @@ -36,8 +36,9 @@ //! multicast VNIs if VPC-isolated multicast groups become necessary. //! //! Security happens at two layers: -//! - **Control plane**: Fleet admins create groups; users attach instances via API -//! - **Dataplane**: Switch hardware validates underlay group membership +//! - **Control plane**: groups created implicitly via member-add; pool linking +//! controls access +//! - **Dataplane**: switch dataplane validates underlay group membership //! //! This allows cross-project and cross-silo multicast while maintaining explicit //! membership control through underlay forwarding tables. @@ -45,12 +46,13 @@ //! ## Underlay Multicast Groups //! //! System-generated admin-scoped IPv6 multicast groups for internal forwarding: -//! - Use IPv6 admin-local multicast scope (ff04::/16) per RFC 7346 -//! +//! - Use IPv6 admin-local multicast scope (ff04::/16) per [RFC 7346] //! - Paired 1:1 with external groups for NAT-based forwarding //! - Handle rack-internal multicast traffic between switches //! - Use individual field pattern for system resources //! +//! [RFC 7346]: https://www.rfc-editor.org/rfc/rfc7346 +//! //! ## Member Lifecycle (handled by RPW) //! //! Multicast group members follow a 3-state lifecycle managed by the @@ -76,9 +78,7 @@ use std::net::IpAddr; use chrono::{DateTime, Utc}; -use diesel::{ - AsChangeset, AsExpression, FromSqlRow, Insertable, Queryable, Selectable, -}; +use diesel::{AsExpression, FromSqlRow, Insertable, Queryable, Selectable}; use ipnetwork::IpNetwork; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; @@ -199,6 +199,8 @@ pub struct ExternalMulticastGroup { /// efficiency, unlike other VLAN columns in the schema which use `SqlU16` /// (forcing INT4). Direct `i16` is appropriate here since VLANs fit in /// INT2's range. + /// + /// TODO(multicast): Remove mvlan field - being deprecated from multicast groups pub mvlan: Option, /// Associated underlay group for NAT. /// Initially None in ["Creating"](MulticastGroupState::Creating) state, @@ -223,6 +225,18 @@ pub struct ExternalMulticastGroup { pub version_removed: Option, } +impl ExternalMulticastGroup { + /// DPD tag for switch configuration. + /// + /// Uses the group's UUID to ensure uniqueness across the group's lifecycle. + /// This prevents tag collision when a group name is reused after deletion + /// (important given implicit create/delete semantics). Both external and + /// underlay groups use the same tag for pairing. + pub fn dpd_tag(&self) -> String { + self.id().to_string() + } +} + /// Values used to create a [MulticastGroupMember] in the database. /// /// This struct is used for database insertions and omits fields that are @@ -237,6 +251,7 @@ pub struct MulticastGroupMemberValues { pub time_modified: DateTime, pub time_deleted: Option>, pub external_group_id: Uuid, + pub multicast_ip: IpNetwork, pub parent_id: Uuid, pub sled_id: Option>, pub state: MulticastGroupMemberState, @@ -268,6 +283,8 @@ pub struct MulticastGroupMember { pub time_deleted: Option>, /// External multicast group this member belongs to. pub external_group_id: Uuid, + /// The multicast IP address of the group this member belongs to. + pub multicast_ip: IpNetwork, /// Parent instance or service that receives multicast traffic. pub parent_id: Uuid, /// Sled hosting the parent. @@ -329,6 +346,7 @@ impl TryFrom for views::MulticastGroupMember { time_modified: member.time_modified, }, multicast_group_id: member.external_group_id, + multicast_ip: member.multicast_ip.ip(), instance_id: member.parent_id, state: member.state.to_string(), }) @@ -393,6 +411,7 @@ impl MulticastGroupMember { pub fn new( id: Uuid, external_group_id: Uuid, + multicast_ip: IpNetwork, parent_id: Uuid, sled_id: Option>, ) -> Self { @@ -402,6 +421,7 @@ impl MulticastGroupMember { time_modified: Utc::now(), time_deleted: None, external_group_id, + multicast_ip, parent_id, sled_id, state: MulticastGroupMemberState::Joining, @@ -456,35 +476,3 @@ pub struct UnderlayMulticastGroup { /// Version when this group was removed. pub version_removed: Option, } - -/// Update data for a multicast group. -#[derive(AsChangeset, Debug, PartialEq, Eq)] -#[diesel(table_name = multicast_group)] -pub struct ExternalMulticastGroupUpdate { - pub name: Option, - pub description: Option, - pub source_ips: Option>, - // Needs to be double Option so we can set a value of null in the DB by - // passing Some(None). None by itself is ignored by Diesel. - pub mvlan: Option>, - pub time_modified: DateTime, -} - -impl From - for ExternalMulticastGroupUpdate -{ - fn from( - params: nexus_types::external_api::params::MulticastGroupUpdate, - ) -> Self { - Self { - name: params.identity.name.map(Name), - description: params.identity.description, - source_ips: params - .source_ips - .map(|ips| ips.into_iter().map(IpNetwork::from).collect()), - // mvlan is always None here - handled manually in datastore - mvlan: None, - time_modified: Utc::now(), - } - } -} diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index 6b91f91804d..ad43b14b5f9 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -16,7 +16,7 @@ use std::{collections::BTreeMap, sync::LazyLock}; /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: Version = Version::new(212, 0, 0); +pub const SCHEMA_VERSION: Version = Version::new(213, 0, 0); /// List of all past database schema versions, in *reverse* order /// @@ -28,6 +28,7 @@ static KNOWN_VERSIONS: LazyLock> = LazyLock::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), + KnownVersion::new(213, "multicast-member-ip-and-indexes"), KnownVersion::new(212, "local-storage-disk-type"), KnownVersion::new(211, "blueprint-sled-config-subnet"), KnownVersion::new(210, "one-big-ereport-table"), diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index b2849277603..02a9ea3af7a 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -310,6 +310,206 @@ impl DataStore { }) } + /// Look up any IP pool by pool type linked to the caller's silo. + /// + /// Prefers the default pool if one exists. If no default exists, falls + /// back to any linked pool, selecting alphabetically by name (arbitrary + /// tie-breaker, not semantically meaningful). Returns an error if no pool + /// of the given type is linked to the caller's silo. + /// + /// Note: For multicast pools, this method does not distinguish between + /// ASM (224/4) and SSM (232/8) pools. + /// - For SSM multicast groups (with `source_ips`), use + /// [`Self::ip_pools_fetch_ssm_multicast`] to ensure an SSM-range pool is + /// selected. + /// - For ASM multicast groups (no `source_ips`), use + /// [`Self::ip_pools_fetch_asm_multicast`] to ensure an ASM-range pool is + /// selected when no explicit pool or IP is provided. + async fn ip_pools_fetch_any_by_type( + &self, + opctx: &OpContext, + pool_type: IpPoolType, + ) -> LookupResult<(authz::IpPool, IpPool)> { + use nexus_db_schema::schema::ip_pool; + use nexus_db_schema::schema::ip_pool_resource; + + let authz_silo_id = opctx.authn.silo_required()?.id(); + let lookup_type = LookupType::ByOther(format!( + "{pool_type} IP pool for current silo" + )); + + // Find any pool of the given type linked to the caller's silo, + // preferring default pools + ip_pool::table + .inner_join(ip_pool_resource::table) + .filter( + ip_pool_resource::resource_type.eq(IpPoolResourceType::Silo), + ) + .filter(ip_pool_resource::resource_id.eq(authz_silo_id)) + .filter(ip_pool::time_deleted.is_null()) + .filter(ip_pool::pool_type.eq(pool_type)) + .order((ip_pool_resource::is_default.desc(), ip_pool::name.asc())) + .select(IpPool::as_select()) + .first_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .map_err(|e| { + public_error_from_diesel_lookup( + e, + ResourceType::IpPool, + &lookup_type, + ) + }) + .map(|ip_pool| { + let authz_pool = + authz::IpPool::new(authz::FLEET, ip_pool.id(), lookup_type); + (authz_pool, ip_pool) + }) + } + + /// Look up an SSM multicast pool linked to the caller's silo. + /// + /// Per [RFC 4607], Source-Specific Multicast (SSM) addresses (IPv4 232/8, + /// IPv6 ff3x::/32) are used when source IPs are specified. This method + /// finds pools with ranges in these SSM address spaces, which is required + /// when creating a multicast group with `source_ips` but without an + /// explicit pool or IP address. + /// + /// Prefers the default pool if one exists. If no default exists, falls + /// back to any linked SSM pool, selecting alphabetically by name (arbitrary + /// tie-breaker). + /// + /// [RFC 4607]: https://datatracker.ietf.org/doc/html/rfc4607 + pub async fn ip_pools_fetch_ssm_multicast( + &self, + opctx: &OpContext, + ) -> LookupResult<(authz::IpPool, IpPool)> { + use nexus_db_schema::schema::ip_pool; + use nexus_db_schema::schema::ip_pool_range; + use nexus_db_schema::schema::ip_pool_resource; + + let authz_silo_id = opctx.authn.silo_required()?.id(); + let lookup_type = + LookupType::ByOther("SSM multicast pool for current silo".into()); + + // We need to find multicast pools with SSM ranges. + // SSM ranges are: IPv4 232.0.0.0/8, IPv6 ff3x::/32 + let pools: Vec<(IpPool, IpPoolRange, bool)> = ip_pool::table + .inner_join(ip_pool_resource::table) + .inner_join( + ip_pool_range::table + .on(ip_pool_range::ip_pool_id.eq(ip_pool::id)), + ) + .filter( + ip_pool_resource::resource_type.eq(IpPoolResourceType::Silo), + ) + .filter(ip_pool_resource::resource_id.eq(authz_silo_id)) + .filter(ip_pool::time_deleted.is_null()) + .filter(ip_pool_range::time_deleted.is_null()) + .filter(ip_pool::pool_type.eq(IpPoolType::Multicast)) + .order((ip_pool_resource::is_default.desc(), ip_pool::name.asc())) + .select(( + IpPool::as_select(), + IpPoolRange::as_select(), + ip_pool_resource::is_default, + )) + .load_async::<(IpPool, IpPoolRange, bool)>( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + // Find the first pool with an SSM range (already ordered by preference) + for (pool, range, _is_default) in pools { + let is_ssm = match range.first_address { + IpNetwork::V4(net) => IPV4_SSM_SUBNET.contains(net.network()), + IpNetwork::V6(net) => IPV6_SSM_SUBNET.contains(net.network()), + }; + + if is_ssm { + let authz_pool = + authz::IpPool::new(authz::FLEET, pool.id(), lookup_type); + return Ok((authz_pool, pool)); + } + } + + // No SSM pool found + Err(public_error_from_diesel_lookup( + DieselError::NotFound, + ResourceType::IpPool, + &lookup_type, + )) + } + + /// Look up an ASM multicast pool linked to the caller's silo. + /// + /// ASM (Any-Source Multicast) addresses are multicast ranges that are not + /// in the SSM spaces (IPv4 232/8, IPv6 ff3x::/32). This method finds pools + /// with ranges outside those SSM address spaces. Use this when creating a + /// multicast group without `source_ips` and without an explicit pool or IP + /// address, to ensure an ASM-range pool is selected. + /// + /// Prefers the default pool if one exists. If no default exists, falls + /// back to any linked ASM pool, selecting alphabetically by name + /// (arbitrary tie-breaker). + pub async fn ip_pools_fetch_asm_multicast( + &self, + opctx: &OpContext, + ) -> LookupResult<(authz::IpPool, IpPool)> { + use nexus_db_schema::schema::ip_pool; + use nexus_db_schema::schema::ip_pool_range; + use nexus_db_schema::schema::ip_pool_resource; + + let authz_silo_id = opctx.authn.silo_required()?.id(); + let lookup_type = + LookupType::ByOther("ASM multicast pool for current silo".into()); + + let pools: Vec<(IpPool, IpPoolRange, bool)> = ip_pool::table + .inner_join(ip_pool_resource::table) + .inner_join( + ip_pool_range::table + .on(ip_pool_range::ip_pool_id.eq(ip_pool::id)), + ) + .filter( + ip_pool_resource::resource_type.eq(IpPoolResourceType::Silo), + ) + .filter(ip_pool_resource::resource_id.eq(authz_silo_id)) + .filter(ip_pool::time_deleted.is_null()) + .filter(ip_pool_range::time_deleted.is_null()) + .filter(ip_pool::pool_type.eq(IpPoolType::Multicast)) + .order((ip_pool_resource::is_default.desc(), ip_pool::name.asc())) + .select(( + IpPool::as_select(), + IpPoolRange::as_select(), + ip_pool_resource::is_default, + )) + .load_async::<(IpPool, IpPoolRange, bool)>( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + for (pool, range, _is_default) in pools { + let is_ssm = match range.first_address { + IpNetwork::V4(net) => IPV4_SSM_SUBNET.contains(net.network()), + IpNetwork::V6(net) => IPV6_SSM_SUBNET.contains(net.network()), + }; + + if !is_ssm { + let authz_pool = + authz::IpPool::new(authz::FLEET, pool.id(), lookup_type); + return Ok((authz_pool, pool)); + } + } + + Err(public_error_from_diesel_lookup( + DieselError::NotFound, + ResourceType::IpPool, + &lookup_type, + )) + } + /// Look up internal service IP Pools for both IP versions. /// /// This is useful when you need to handle resources like external IPs where @@ -346,7 +546,7 @@ impl DataStore { /// Pool resolution for allocation by pool type. /// - /// If pool is provided, validate it's linked to this silo and is of the + /// If a pool is provided, validate it's linked to this silo and is of the /// correct type. If no pool is provided, fetch the default pool of the /// specified type for this silo. Once the pool is resolved (by either /// method) do an auth check. Then return the pool. @@ -379,20 +579,30 @@ impl DataStore { // Verify it's the correct pool type if pool_record.pool_type != pool_type { return Err(Error::invalid_request(&format!( - "Pool '{}' is not a {} pool (type: {})", - pool_record.identity.name, - pool_type, - pool_record.pool_type + "Pool '{}' is not a {pool_type} pool (type: {})", + pool_record.identity.name, pool_record.pool_type ))); } authz_pool } - // If no pool specified, use the default pool of the specified type + // If no pool specified, find a pool of the specified type. + // + // For multicast pools, use `ip_pools_fetch_any_by_type` which + // prefers the default but falls back to any linked pool (selecting + // alphabetically by name if multiple exist). + // For unicast pools, require the default pool (existing behavior). None => { - let (authz_pool, ..) = self - .ip_pools_fetch_default_by_type(opctx, pool_type) - .await?; + let (authz_pool, ..) = match pool_type { + IpPoolType::Multicast => { + self.ip_pools_fetch_any_by_type(opctx, pool_type) + .await? + } + IpPoolType::Unicast => { + self.ip_pools_fetch_default_by_type(opctx, pool_type) + .await? + } + }; authz_pool } }; @@ -891,19 +1101,42 @@ impl DataStore { } })?; + // Only link default gateway for unicast pools (not multicast pools). + // Internet gateways are used for unicast traffic routing, not multicast. if ip_pool_resource.is_default { - self.link_default_gateway( - opctx, - ip_pool_resource.resource_id, - ip_pool_resource.ip_pool_id, - &conn, - ) - .await?; + use nexus_db_schema::schema::ip_pool::dsl; + + let pool_type: IpPoolType = dsl::ip_pool + .filter(dsl::id.eq(ip_pool_resource.ip_pool_id)) + .filter(dsl::time_deleted.is_null()) + .select(dsl::pool_type) + .first_async(&*conn) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + })?; + + if pool_type == IpPoolType::Unicast { + self.link_default_gateway( + opctx, + ip_pool_resource.resource_id, + ip_pool_resource.ip_pool_id, + &conn, + ) + .await?; + } } Ok(result) } + // Links the default internet gateway for all VPCs in a silo to the given + // IP pool. + // + // This is only applicable to unicast pools, where as multicast + // traffic uses DPD/switch-level forwarding rather than internet gateway + // routing. + // // TODO-correctness: This seems like it should be in a transaction. At // least, the nested-loops can mostly be re-expressed as a join between the // silos, projects, vpcs, and Internet gateway tables. @@ -1068,6 +1301,12 @@ impl DataStore { Ok(()) } + // TODO: The current model only allows one default pool per silo, but we + // need up to 4 default pools: IPv4/unicast, IPv4/multicast, IPv6/unicast, + // IPv6/multicast. Without this, users would have to switch the default + // pool back and forth for allocation to work between pool types. + // The schema should support a default pool per (silo, ip_version, pool_type) + // tuple. pub async fn ip_pool_set_default( &self, opctx: &OpContext, @@ -1118,8 +1357,8 @@ impl DataStore { .transaction(&conn, |conn| { let err = err.clone(); async move { - // note this is matching the specified silo, but could be any pool - let existing_default_for_silo = dsl::ip_pool_resource + // Find existing default for this silo. + let existing_default = dsl::ip_pool_resource .filter(dsl::resource_type.eq(IpPoolResourceType::Silo)) .filter(dsl::resource_id.eq(silo_id)) .filter(dsl::is_default.eq(true)) @@ -1127,9 +1366,9 @@ impl DataStore { .get_result_async(&conn) .await; - // if there is an existing default, we need to unset it before we can - // set the new default - if let Ok(existing_default) = existing_default_for_silo { + // if there is an existing default, we need to unset it + // before we can set the new default + if let Ok(existing_default) = existing_default { // if the pool we're making default is already default for this // silo, don't error: just noop if existing_default.ip_pool_id == ip_pool_id { @@ -1404,6 +1643,7 @@ impl DataStore { ) -> DeleteResult { use nexus_db_schema::schema::external_ip; use nexus_db_schema::schema::ip_pool_range::dsl; + use nexus_db_schema::schema::multicast_group; opctx.authorize(authz::Action::Modify, authz_pool).await?; let pool_id = authz_pool.id(); @@ -1454,6 +1694,23 @@ impl DataStore { )); } + // Find multicast groups allocated out of this pool and range. + let has_multicast_groups = diesel::dsl::select(diesel::dsl::exists( + multicast_group::table + .filter(multicast_group::dsl::ip_pool_id.eq(pool_id)) + .filter(multicast_group::dsl::ip_pool_range_id.eq(range_id)) + .filter(multicast_group::dsl::time_deleted.is_null()), + )) + .get_result_async::(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + if has_multicast_groups { + return Err(Error::invalid_request( + "IP pool ranges cannot be deleted while \ + multicast groups are allocated from them", + )); + } + // Delete the range, conditional on the rcgen not having changed. This // protects the delete from occuring if clients allocated a new external // IP address in between the above check for children and this query. @@ -1482,6 +1739,14 @@ impl DataStore { /// with existing ranges in the pool, i.e., that we don't mix ASM and SSM /// ranges in the same pool. /// + /// This validation is at the DB layer (rather than the app layer) because + /// it requires checking existing ranges within a transaction to prevent + /// race conditions. + /// + /// Note: Reserved range validation (link-local, GLOP, admin-scoped, etc.) + /// is done at the app layer in `validate_multicast_range` since those + /// checks are stateless. + /// /// Takes in a connection so it can be called from within a /// transaction context. async fn validate_multicast_pool_range_consistency_on_conn( @@ -1571,6 +1836,60 @@ impl DataStore { Ok(is_ssm) } + + /// Find a multicast pool containing the given IP that is linked to the + /// caller's silo. + /// + /// Returns `Ok(None)` if no multicast pool contains the IP or if the pool + /// is not linked to the caller's silo. Pool ranges are globally unique, so + /// at most one pool can contain any IP. + /// + /// Note: This is only called for new group creation. For existing groups, + /// the lookup path skips pool verification since groups are fleet-scoped. + pub async fn ip_pool_containing_multicast_ip( + &self, + opctx: &OpContext, + ip: std::net::IpAddr, + ) -> LookupResult> { + use nexus_db_schema::schema::ip_pool; + use nexus_db_schema::schema::ip_pool_range; + use nexus_db_schema::schema::ip_pool_resource; + + let authz_silo_id = opctx.authn.silo_required()?.id(); + + // Convert the single IP to an IpNetwork for comparison + let ip_net = IpNetwork::from(ip); + + ip_pool::table + .inner_join( + ip_pool_range::table + .on(ip_pool_range::ip_pool_id.eq(ip_pool::id)), + ) + .inner_join( + ip_pool_resource::table + .on(ip_pool_resource::ip_pool_id.eq(ip_pool::id)), + ) + // Pool must be multicast type + .filter(ip_pool::pool_type.eq(IpPoolType::Multicast)) + // Pool and range must not be deleted + .filter(ip_pool::time_deleted.is_null()) + .filter(ip_pool_range::time_deleted.is_null()) + // Pool must be linked to caller's silo + .filter( + ip_pool_resource::resource_type.eq(IpPoolResourceType::Silo), + ) + .filter(ip_pool_resource::resource_id.eq(authz_silo_id)) + // IP must be within the range + .filter(ip_pool_range::first_address.le(ip_net)) + .filter(ip_pool_range::last_address.ge(ip_net)) + .select(IpPool::as_select()) + .first_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .optional() + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } } // Sentinel we try to cast as a UUID in the database, when linking an IP Pool to @@ -1959,7 +2278,7 @@ fn reserve_internal_ip_pool_query( #[cfg(test)] mod test { - use std::net::{Ipv4Addr, Ipv6Addr}; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::num::NonZeroU32; use crate::authz; @@ -2617,7 +2936,7 @@ mod test { ), ) .await - .expect("Failed to create multicast IP pool"); + .expect("Should create multicast IP pool"); let authz_silo = opctx.authn.silo_required().unwrap(); let link = IpPoolResource { @@ -2691,7 +3010,7 @@ mod test { ), ) .await - .expect("Failed to create multicast IP pool"); + .expect("Should create multicast IP pool"); let link = IpPoolResource { ip_pool_id: pool.id(), @@ -2702,7 +3021,7 @@ mod test { datastore .ip_pool_link_silo(&opctx, link) .await - .expect("Could not link multicast pool to silo"); + .expect("Should link multicast pool to silo"); // Now should find the default multicast pool let default_pool = datastore @@ -2720,6 +3039,153 @@ mod test { logctx.cleanup_successful(); } + #[tokio::test] + async fn test_multicast_ip_pool_fetch_any_by_type() { + let logctx = + dev::test_setup_log("test_multicast_ip_pool_fetch_any_by_type"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + let authz_silo = opctx.authn.silo_required().unwrap(); + + // Initially no multicast pool at all + let error = datastore + .ip_pools_fetch_any_by_type(&opctx, IpPoolType::Multicast) + .await + .unwrap_err(); + assert_matches!(error, Error::ObjectNotFound { .. }); + + // Create and link a multicast pool with `is_default=false` + let identity = IdentityMetadataCreateParams { + name: "non-default-multicast-pool".parse().unwrap(), + description: "Non-default multicast pool".to_string(), + }; + let pool = datastore + .ip_pool_create( + &opctx, + IpPool::new_multicast( + &identity, + IpVersion::V4, + IpPoolReservationType::ExternalSilos, + ), + ) + .await + .expect("Should create multicast IP pool"); + + let link = IpPoolResource { + ip_pool_id: pool.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: authz_silo.id(), + is_default: false, + }; + datastore + .ip_pool_link_silo(&opctx, link) + .await + .expect("Should link multicast pool to silo"); + + // fetch_default_by_type should fail (no default) + let error = datastore + .ip_pools_fetch_default_by_type(&opctx, IpPoolType::Multicast) + .await + .unwrap_err(); + assert_matches!(error, Error::ObjectNotFound { .. }); + + // But fetch_any_by_type should succeed + let found_pool = datastore + .ip_pools_fetch_any_by_type(&opctx, IpPoolType::Multicast) + .await + .expect("Should find non-default multicast pool"); + assert_eq!(found_pool.1.id(), pool.id()); + assert_eq!(found_pool.1.pool_type, IpPoolType::Multicast); + + // Create another pool and mark it as default + let default_identity = IdentityMetadataCreateParams { + name: "default-multicast-pool".parse().unwrap(), + description: "Default multicast pool".to_string(), + }; + let default_pool = datastore + .ip_pool_create( + &opctx, + IpPool::new_multicast( + &default_identity, + IpVersion::V4, + IpPoolReservationType::ExternalSilos, + ), + ) + .await + .expect("Should create default multicast IP pool"); + + let default_link = IpPoolResource { + ip_pool_id: default_pool.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: authz_silo.id(), + is_default: true, // This one is the default + }; + datastore + .ip_pool_link_silo(&opctx, default_link) + .await + .expect("Could not link default multicast pool to silo"); + + // Now fetch_any_by_type should prefer the default pool + let found_pool = datastore + .ip_pools_fetch_any_by_type(&opctx, IpPoolType::Multicast) + .await + .expect("Should find default multicast pool"); + assert_eq!(found_pool.1.id(), default_pool.id()); + assert_eq!( + found_pool.1.identity.name.to_string(), + "default-multicast-pool" + ); + + // Case: Silo-scoped multicast pool lookup + // Verify that multicast pools require linking: a user from a silo + // that is not linked to the pool should not find it. + // Pool linking is the mechanism of access control for multicast. + + // Create an opctx for a completely different silo (random UUID) + let other_silo_id = uuid::Uuid::new_v4(); + let other_silo_user_id = omicron_uuid_kinds::SiloUserUuid::new_v4(); + let other_silo_authn = crate::authn::Context::for_test_user( + other_silo_user_id, + other_silo_id, + // Use same authn policy - the key thing is the silo_id differs + crate::authn::SiloAuthnPolicy::try_from( + &*nexus_db_fixed_data::silo::DEFAULT_SILO, + ) + .unwrap(), + ); + let other_silo_opctx = opctx.child_with_authn(other_silo_authn); + + // This user is from a silo that has no pool links, so they should + // not find the multicast pool (pool linking controls access) + let multicast_error = datastore + .ip_pools_fetch_any_by_type( + &other_silo_opctx, + IpPoolType::Multicast, + ) + .await + .unwrap_err(); + assert_matches!( + multicast_error, + Error::ObjectNotFound { .. }, + "Multicast pools require linking - unlinked silo should not find pool" + ); + + // Same for unicast - pools are silo-scoped + let unicast_error = datastore + .ip_pools_fetch_any_by_type(&other_silo_opctx, IpPoolType::Unicast) + .await + .unwrap_err(); + assert_matches!( + unicast_error, + Error::ObjectNotFound { .. }, + "Unicast pools also require linking" + ); + + db.terminate().await; + logctx.cleanup_successful(); + } + #[tokio::test] async fn test_multicast_ip_pool_ranges() { let logctx = dev::test_setup_log("test_multicast_ip_pool_ranges"); @@ -2741,7 +3207,7 @@ mod test { ), ) .await - .expect("Failed to create IPv4 multicast IP pool"); + .expect("Should create IPv4 multicast IP pool"); let authz_ipv4_pool = authz::IpPool::new( authz::FLEET, @@ -2782,7 +3248,7 @@ mod test { ), ) .await - .expect("Failed to create IPv6 multicast IP pool"); + .expect("Should create IPv6 multicast IP pool"); let authz_ipv6_pool = authz::IpPool::new( authz::FLEET, @@ -2790,11 +3256,12 @@ mod test { LookupType::ById(ipv6_pool.id()), ); - // Add IPv6 multicast range (ff00::/8) + // Add IPv6 multicast range - use site-local scope (ff05::/16) + // Note: ff00::/16, ff01::/16, ff02::/16 are reserved let ipv6_range = IpRange::V6( Ipv6Range::new( - Ipv6Addr::new(0xff01, 0, 0, 0, 0, 0, 0, 1), - Ipv6Addr::new(0xff01, 0, 0, 0, 0, 0, 0, 10), + Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 1), + Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 10), ) .unwrap(), ); @@ -2826,6 +3293,269 @@ mod test { logctx.cleanup_successful(); } + #[tokio::test] + async fn test_ip_pools_fetch_ssm_multicast() { + let logctx = dev::test_setup_log("test_ip_pools_fetch_ssm_multicast"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + let authz_silo = opctx.authn.silo_required().unwrap(); + + // Initially no SSM pool - should fail + let error = + datastore.ip_pools_fetch_ssm_multicast(&opctx).await.unwrap_err(); + assert_matches!(error, Error::ObjectNotFound { .. }); + + // Create ASM pool with name that comes first alphabetically + // ASM uses 224.x.x.x range + let asm_identity = IdentityMetadataCreateParams { + name: "aaa-asm-multicast-pool".parse().unwrap(), + description: "ASM multicast pool".to_string(), + }; + let asm_pool = datastore + .ip_pool_create( + &opctx, + IpPool::new_multicast( + &asm_identity, + IpVersion::V4, + IpPoolReservationType::ExternalSilos, + ), + ) + .await + .expect("Should create ASM multicast IP pool"); + + let authz_asm_pool = authz::IpPool::new( + authz::FLEET, + asm_pool.id(), + LookupType::ById(asm_pool.id()), + ); + + // Add ASM range (224.x.x.x) + let asm_range = IpRange::V4( + Ipv4Range::new( + Ipv4Addr::new(224, 1, 1, 1), + Ipv4Addr::new(224, 1, 1, 10), + ) + .unwrap(), + ); + datastore + .ip_pool_add_range(&opctx, &authz_asm_pool, &asm_pool, &asm_range) + .await + .expect("Could not add ASM multicast range"); + + // Link ASM pool to silo + let asm_link = IpPoolResource { + ip_pool_id: asm_pool.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: authz_silo.id(), + is_default: false, + }; + datastore + .ip_pool_link_silo(&opctx, asm_link) + .await + .expect("Should link ASM multicast pool to silo"); + + // Even with ASM pool linked, fetch_ssm should still fail (no SSM pool) + let error = + datastore.ip_pools_fetch_ssm_multicast(&opctx).await.unwrap_err(); + assert_matches!(error, Error::ObjectNotFound { .. }); + + // Create SSM pool with name that comes after alphabetical ordering + // SSM uses 232.x.x.x range + let ssm_identity = IdentityMetadataCreateParams { + name: "zzz-ssm-multicast-pool".parse().unwrap(), + description: "SSM multicast pool".to_string(), + }; + let ssm_pool = datastore + .ip_pool_create( + &opctx, + IpPool::new_multicast( + &ssm_identity, + IpVersion::V4, + IpPoolReservationType::ExternalSilos, + ), + ) + .await + .expect("Should create SSM multicast IP pool"); + + let authz_ssm_pool = authz::IpPool::new( + authz::FLEET, + ssm_pool.id(), + LookupType::ById(ssm_pool.id()), + ); + + // Add SSM range (232.x.x.x) + let ssm_range = IpRange::V4( + Ipv4Range::new( + Ipv4Addr::new(232, 1, 1, 1), + Ipv4Addr::new(232, 1, 1, 10), + ) + .unwrap(), + ); + datastore + .ip_pool_add_range(&opctx, &authz_ssm_pool, &ssm_pool, &ssm_range) + .await + .expect("Could not add SSM multicast range"); + + // Link SSM pool to silo + let ssm_link = IpPoolResource { + ip_pool_id: ssm_pool.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: authz_silo.id(), + is_default: false, + }; + datastore + .ip_pool_link_silo(&opctx, ssm_link) + .await + .expect("Should link SSM multicast pool to silo"); + + // `ip_pools_fetch_ssm_multicast`` should succeed and return the SSM pool + let (_, found_pool) = datastore + .ip_pools_fetch_ssm_multicast(&opctx) + .await + .expect("Should find SSM multicast pool"); + assert_eq!(found_pool.id(), ssm_pool.id()); + assert_eq!( + found_pool.identity.name.to_string(), + "zzz-ssm-multicast-pool" + ); + + db.terminate().await; + logctx.cleanup_successful(); + } + + /// Verify ASM pool selection selects pools whose ranges are not SSM + /// (e.g., IPv4 224/4 but not 232/8), and does not get confused by + /// alphabetical ordering or the presence of SSM pools. + #[tokio::test] + async fn test_ip_pools_fetch_asm_multicast() { + let logctx = dev::test_setup_log("test_ip_pools_fetch_asm_multicast"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + let authz_silo = opctx.authn.silo_required().unwrap(); + + // Initially no ASM pool - should fail + let error = + datastore.ip_pools_fetch_asm_multicast(&opctx).await.unwrap_err(); + assert_matches!(error, Error::ObjectNotFound { .. }); + + // Create SSM pool first with a name that would sort first if chosen + let ssm_identity = IdentityMetadataCreateParams { + name: "a-ssm-multicast-pool".parse().unwrap(), + description: "SSM multicast pool".to_string(), + }; + let ssm_pool = datastore + .ip_pool_create( + &opctx, + IpPool::new_multicast( + &ssm_identity, + IpVersion::V4, + IpPoolReservationType::ExternalSilos, + ), + ) + .await + .expect("Should create SSM multicast IP pool"); + + let authz_ssm_pool = authz::IpPool::new( + authz::FLEET, + ssm_pool.id(), + LookupType::ById(ssm_pool.id()), + ); + + // Add SSM range (232.x.x.x) + let ssm_range = IpRange::V4( + Ipv4Range::new( + Ipv4Addr::new(232, 2, 2, 2), + Ipv4Addr::new(232, 2, 2, 20), + ) + .unwrap(), + ); + datastore + .ip_pool_add_range(&opctx, &authz_ssm_pool, &ssm_pool, &ssm_range) + .await + .expect("Could not add SSM multicast range"); + + // Link SSM pool to silo + let ssm_link = IpPoolResource { + ip_pool_id: ssm_pool.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: authz_silo.id(), + is_default: false, + }; + datastore + .ip_pool_link_silo(&opctx, ssm_link) + .await + .expect("Should link SSM pool to silo"); + + // With only SSM pool linked, ASM lookup should still fail + let error = + datastore.ip_pools_fetch_asm_multicast(&opctx).await.unwrap_err(); + assert_matches!(error, Error::ObjectNotFound { .. }); + + // Create ASM pool and link + let asm_identity = IdentityMetadataCreateParams { + name: "zzz-asm-multicast-pool".parse().unwrap(), + description: "ASM multicast pool".to_string(), + }; + let asm_pool = datastore + .ip_pool_create( + &opctx, + IpPool::new_multicast( + &asm_identity, + IpVersion::V4, + IpPoolReservationType::ExternalSilos, + ), + ) + .await + .expect("Should create ASM multicast IP pool"); + + let authz_asm_pool = authz::IpPool::new( + authz::FLEET, + asm_pool.id(), + LookupType::ById(asm_pool.id()), + ); + + // Add ASM range (224.x.x.x) + let asm_range = IpRange::V4( + Ipv4Range::new( + Ipv4Addr::new(224, 3, 3, 3), + Ipv4Addr::new(224, 3, 3, 15), + ) + .unwrap(), + ); + datastore + .ip_pool_add_range(&opctx, &authz_asm_pool, &asm_pool, &asm_range) + .await + .expect("Could not add ASM multicast range"); + + // Link ASM pool to silo + let asm_link = IpPoolResource { + ip_pool_id: asm_pool.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: authz_silo.id(), + is_default: false, + }; + datastore + .ip_pool_link_silo(&opctx, asm_link) + .await + .expect("Should link ASM pool to silo"); + + // ASM fetcher should now return the ASM pool + let (_, found_pool) = datastore + .ip_pools_fetch_asm_multicast(&opctx) + .await + .expect("Should find ASM multicast pool"); + assert_eq!(found_pool.id(), asm_pool.id()); + assert_eq!( + found_pool.identity.name.to_string(), + "zzz-asm-multicast-pool" + ); + + db.terminate().await; + logctx.cleanup_successful(); + } + #[tokio::test] async fn paginate_ip_pools_by_delegation_type() { let logctx = @@ -3569,4 +4299,106 @@ mod test { ) .await; } + + #[tokio::test] + async fn test_ip_pool_containing_multicast_ip() { + let logctx = + dev::test_setup_log("test_ip_pool_containing_multicast_ip"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + let authz_silo = opctx.authn.silo_required().unwrap(); + + // Create a multicast pool with an IPv4 range + let identity = IdentityMetadataCreateParams { + name: "multicast-pool".parse().unwrap(), + description: "Test multicast IP pool".to_string(), + }; + let pool = datastore + .ip_pool_create( + &opctx, + IpPool::new_multicast( + &identity, + IpVersion::V4, + IpPoolReservationType::ExternalSilos, + ), + ) + .await + .expect("Should create multicast IP pool"); + + let authz_pool = authz::IpPool::new( + authz::FLEET, + pool.id(), + LookupType::ById(pool.id()), + ); + + // Link pool to silo + let link = IpPoolResource { + ip_pool_id: pool.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: authz_silo.id(), + is_default: false, + }; + datastore + .ip_pool_link_silo(&opctx, link) + .await + .expect("Should link pool to silo"); + + // Add multicast range 224.1.1.0 - 224.1.1.255 + let range = IpRange::V4( + Ipv4Range::new( + Ipv4Addr::new(224, 1, 1, 0), + Ipv4Addr::new(224, 1, 1, 255), + ) + .unwrap(), + ); + datastore + .ip_pool_add_range(&opctx, &authz_pool, &pool, &range) + .await + .expect("Should add range to pool"); + + // Case: IP within range should find the pool + let ip_in_range = IpAddr::V4(Ipv4Addr::new(224, 1, 1, 100)); + let result = datastore + .ip_pool_containing_multicast_ip(&opctx, ip_in_range) + .await + .expect("Query should succeed"); + assert!(result.is_some(), "Should find pool containing 224.1.1.100"); + assert_eq!(result.unwrap().id(), pool.id()); + + // Case: IP at range start should find the pool + let ip_start = IpAddr::V4(Ipv4Addr::new(224, 1, 1, 0)); + let result = datastore + .ip_pool_containing_multicast_ip(&opctx, ip_start) + .await + .expect("Query should succeed"); + assert!(result.is_some(), "Should find pool containing 224.1.1.0"); + + // Case: IP at range end should find the pool + let ip_end = IpAddr::V4(Ipv4Addr::new(224, 1, 1, 255)); + let result = datastore + .ip_pool_containing_multicast_ip(&opctx, ip_end) + .await + .expect("Query should succeed"); + assert!(result.is_some(), "Should find pool containing 224.1.1.255"); + + // Case: IP outside range should return None + let ip_outside = IpAddr::V4(Ipv4Addr::new(224, 2, 1, 1)); + let result = datastore + .ip_pool_containing_multicast_ip(&opctx, ip_outside) + .await + .expect("Query should succeed"); + assert!(result.is_none(), "Should not find pool for 224.2.1.1"); + + // Case: Non-multicast IP should return None + let non_multicast = IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1)); + let result = datastore + .ip_pool_containing_multicast_ip(&opctx, non_multicast) + .await + .expect("Query should succeed"); + assert!(result.is_none(), "Should not find pool for non-multicast IP"); + + db.terminate().await; + logctx.cleanup_successful(); + } } diff --git a/nexus/db-queries/src/db/datastore/multicast/groups.rs b/nexus/db-queries/src/db/datastore/multicast/groups.rs index 090fd4a4608..74859f4f2bd 100644 --- a/nexus/db-queries/src/db/datastore/multicast/groups.rs +++ b/nexus/db-queries/src/db/datastore/multicast/groups.rs @@ -21,13 +21,13 @@ use diesel::result::{ }; use ipnetwork::IpNetwork; use ref_cast::RefCast; -use slog::{error, info}; +use slog::{debug, error, info}; use uuid::Uuid; use nexus_db_errors::{ErrorHandler, public_error_from_diesel}; use nexus_db_lookup::DbConnection; -use nexus_types::external_api::params; use nexus_types::identity::Resource; +use nexus_types::multicast::MulticastGroupCreate; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::{ self, CreateResult, DataPageParams, DeleteResult, @@ -41,10 +41,9 @@ use crate::authz; use crate::context::OpContext; use crate::db::datastore::DataStore; use crate::db::model::{ - ExternalMulticastGroup, ExternalMulticastGroupUpdate, - IncompleteExternalMulticastGroup, IncompleteExternalMulticastGroupParams, - IpPoolType, MulticastGroup, MulticastGroupState, Name, - UnderlayMulticastGroup, Vni, + ExternalMulticastGroup, IncompleteExternalMulticastGroup, + IncompleteExternalMulticastGroupParams, IpPoolType, MulticastGroup, + MulticastGroupState, Name, UnderlayMulticastGroup, Vni, }; use crate::db::pagination::paginated; use crate::db::queries::external_multicast_group::NextExternalMulticastGroup; @@ -63,7 +62,8 @@ pub(crate) struct MulticastGroupAllocationParams { impl DataStore { /// List multicast groups by state. /// - /// Used by RPW reconciler. + /// Used by RPW reconciler. For "Deleting" state, this includes groups with + /// `time_deleted` set so the RPW can clean them up. pub async fn multicast_groups_list_by_state( &self, opctx: &OpContext, @@ -72,21 +72,58 @@ impl DataStore { ) -> ListResultVec { use nexus_db_schema::schema::multicast_group::dsl; - paginated(dsl::multicast_group, dsl::id, pagparams) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::state.eq(state)) + let mut query = paginated(dsl::multicast_group, dsl::id, pagparams) + .filter(dsl::state.eq(state)); + + if state != MulticastGroupState::Deleting { + query = query.filter(dsl::time_deleted.is_null()); + } + + query .select(MulticastGroup::as_select()) .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } - /// Set multicast group state. - pub async fn multicast_group_set_state( + /// List multicast groups matching any of the provided states. + /// + /// Used by RPW reconciler. For "Deleting" state, includes groups with + /// `time_deleted` set so the RPW can clean them up. + pub async fn multicast_groups_list_by_states( + &self, + opctx: &OpContext, + states: &[MulticastGroupState], + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + use nexus_db_schema::schema::multicast_group::dsl; + + let mut query = paginated(dsl::multicast_group, dsl::id, pagparams) + .filter(dsl::state.eq_any(states.to_vec())); + + if !states.contains(&MulticastGroupState::Deleting) { + query = query.filter(dsl::time_deleted.is_null()); + } + + query + .select(MulticastGroup::as_select()) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// Transition multicast group to "Active" state. + /// + /// This is used after successfully programming the dataplane (DPD) to mark + /// the group as fully operational. + /// + /// Note: this is the only valid state transition via this API. To delete a + /// group, use [`Self::mark_multicast_group_for_removal_if_no_members`] which + /// handles the "Deleting" state transition along with setting `time_deleted`. + pub async fn multicast_group_set_active( &self, opctx: &OpContext, group_id: MulticastGroupUuid, - new_state: MulticastGroupState, ) -> UpdateResult<()> { use nexus_db_schema::schema::multicast_group::dsl; @@ -94,7 +131,7 @@ impl DataStore { .filter(dsl::id.eq(group_id.into_untyped_uuid())) .filter(dsl::time_deleted.is_null()) .set(( - dsl::state.eq(new_state), + dsl::state.eq(MulticastGroupState::Active), dsl::time_modified.eq(diesel::dsl::now), )) .execute_async(&*self.pool_connection_authorized(opctx).await?) @@ -118,7 +155,7 @@ impl DataStore { pub async fn multicast_group_create( &self, opctx: &OpContext, - params: ¶ms::MulticastGroupCreate, + params: &MulticastGroupCreate, authz_pool: Option, ) -> CreateResult { self.allocate_external_multicast_group( @@ -198,7 +235,7 @@ impl DataStore { }) } - /// List multicast groups (fleet-wide). + /// List multicast groups (fleet-scoped for visibility). pub async fn multicast_groups_list( &self, opctx: &OpContext, @@ -223,91 +260,61 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } - /// Update a multicast group. - pub async fn multicast_group_update( - &self, - opctx: &OpContext, - group_id: MulticastGroupUuid, - params: ¶ms::MulticastGroupUpdate, - ) -> UpdateResult { - use nexus_db_schema::schema::multicast_group::dsl; - - // Create update struct with mvlan=None (won't update field) - let mut update = ExternalMulticastGroupUpdate::from(params.clone()); - - // Handle mvlan manually like VpcSubnetUpdate handles custom_router_id - // - None: leave as None (don't update field) - // - Some(Nullable(Some(v))): set to update field to value - // - Some(Nullable(None)): set to update field to NULL - if let Some(mvlan) = ¶ms.mvlan { - update.mvlan = Some(mvlan.0.map(|vlan| u16::from(vlan) as i16)); - } - - diesel::update(dsl::multicast_group) - .filter(dsl::id.eq(group_id.into_untyped_uuid())) - .filter(dsl::time_deleted.is_null()) - .set(update) - .returning(ExternalMulticastGroup::as_returning()) - .get_result_async(&*self.pool_connection_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel( - e, - ErrorHandler::NotFoundByLookup( - ResourceType::MulticastGroup, - LookupType::ById(group_id.into_untyped_uuid()), - ), - ) - }) - } - - /// Mark a multicast group for deletion by transitioning to "DELETING" state. + /// Mark a multicast group for deletion, but only if it has no active members. /// - /// Unlike members (which use `time_deleted` to distinguish temporary vs - /// permanent removal), groups use a simpler model: - /// - "DELETING" state = permanent removal in progress - /// - RPW reconciler handles cleanup then removes the row entirely - /// - `time_deleted` is only set as final step before row deletion + /// This is a safe implicit deletion method. It atomically checks that no members + /// exist before marking the group as "Deleting". This prevents race conditions + /// where a concurrent join could create a member between a "list members" + /// check and the mark-for-removal call. /// - /// The group remains visible in queries until the reconciler completes - /// cleanup and hard-deletes the row. - pub async fn mark_multicast_group_for_removal( + /// Returns: + /// - `Ok(true)` if the group was marked for deletion (no members existed) + /// - `Ok(false)` if the group still has members (not marked) + /// - `Err` on database errors + pub async fn mark_multicast_group_for_removal_if_no_members( &self, opctx: &OpContext, group_id: MulticastGroupUuid, - ) -> DeleteResult { - use nexus_db_schema::schema::multicast_group::dsl; + ) -> Result { + use nexus_db_schema::schema::multicast_group; + use nexus_db_schema::schema::multicast_group_member; let now = Utc::now(); - diesel::update(dsl::multicast_group) - .filter(dsl::id.eq(group_id.into_untyped_uuid())) + // Atomic: only mark `Deleting` if no active members exist. + let rows = diesel::update(multicast_group::table) + .filter(multicast_group::id.eq(group_id.into_untyped_uuid())) .filter( - dsl::state + multicast_group::state .eq(MulticastGroupState::Active) - .or(dsl::state.eq(MulticastGroupState::Creating)), - ) - .filter(dsl::time_deleted.is_null()) + .or(multicast_group::state + .eq(MulticastGroupState::Creating)), + ) + .filter(multicast_group::time_deleted.is_null()) + .filter(diesel::dsl::not(diesel::dsl::exists( + multicast_group_member::table + .filter( + multicast_group_member::external_group_id + .eq(group_id.into_untyped_uuid()), + ) + .filter(multicast_group_member::time_deleted.is_null()), + ))) .set(( - dsl::state.eq(MulticastGroupState::Deleting), - dsl::time_modified.eq(now), + multicast_group::state.eq(MulticastGroupState::Deleting), + multicast_group::time_deleted.eq(now), + multicast_group::time_modified.eq(now), )) - .returning(ExternalMulticastGroup::as_returning()) - .get_result_async(&*self.pool_connection_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| { - public_error_from_diesel( - e, - ErrorHandler::NotFoundByLookup( - ResourceType::MulticastGroup, - LookupType::ById(group_id.into_untyped_uuid()), - ), - ) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; - Ok(()) + Ok(rows > 0) } /// Delete a multicast group permanently. + /// + /// This should only be called by the RPW reconciler after DPD cleanup. + /// Requires both `state=Deleting` and `time_deleted IS NOT NULL` as a + /// safety check. pub async fn multicast_group_delete( &self, opctx: &OpContext, @@ -315,45 +322,114 @@ impl DataStore { ) -> DeleteResult { use nexus_db_schema::schema::multicast_group::dsl; - diesel::delete(dsl::multicast_group) + let deleted_rows = diesel::delete(dsl::multicast_group) .filter(dsl::id.eq(group_id.into_untyped_uuid())) + .filter(dsl::state.eq(MulticastGroupState::Deleting)) + .filter(dsl::time_deleted.is_not_null()) .execute_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) - .map(|_| ()) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + if deleted_rows == 0 { + return Err(external::Error::not_found_by_id( + ResourceType::MulticastGroup, + &group_id.into_untyped_uuid(), + )); + } + + Ok(()) } /// Allocate an external multicast group from an IP Pool. /// - /// See [`Self::allocate_external_multicast_group_on_conn`] for the connection-reusing variant. + /// See [`Self::allocate_external_multicast_group_on_conn`] for the + /// connection-reusing variant. pub(crate) async fn allocate_external_multicast_group( &self, opctx: &OpContext, params: MulticastGroupAllocationParams, ) -> CreateResult { let group_id = Uuid::new_v4(); - let authz_pool = self - .resolve_pool_for_allocation( + + // Determine if this is an SSM request (source_ips provided) or an + // implicit ASM request (no sources, no explicit pool/IP) + let sources_empty = + params.source_ips.as_ref().map(|v| v.is_empty()).unwrap_or(true); + let needs_ssm_pool = + !sources_empty && params.pool.is_none() && params.ip.is_none(); + let needs_asm_pool = + sources_empty && params.pool.is_none() && params.ip.is_none(); + + // Select the appropriate pool: + // - If `source_ips` provided without explicit pool/IP, find an SSM pool. + // - If no `source_ips` and no explicit pool/IP, find an ASM pool. + // - Otherwise (explicit pool or explicit IP provided), fall back to + // generic resolution via `resolve_pool_for_allocation` which validates + // linkage and type. ASM/SSM semantics are still enforced below. + let authz_pool = if needs_ssm_pool { + let (authz_pool, _) = self + .ip_pools_fetch_ssm_multicast(opctx) + .await + .map_err(|_| { + external::Error::invalid_request(concat!( + "No SSM multicast pool linked to your silo. ", + "Create a multicast pool with SSM ranges ", + "(IPv4 232/8, IPv6 ff3x::/32) and link it to ", + "your silo, or provide an explicit SSM address.", + )) + })?; + opctx.authorize(authz::Action::CreateChild, &authz_pool).await?; + authz_pool + } else if needs_asm_pool { + let (authz_pool, _) = self + .ip_pools_fetch_asm_multicast(opctx) + .await + .map_err(|_| { + external::Error::invalid_request(concat!( + "No ASM multicast pool linked to your silo. ", + "Create a multicast pool with ASM ranges ", + "(IPv4 224/4 excluding 232/8, or IPv6 ffxx::/16 ", + "excluding ff3x::/32) and link it to your silo, ", + "or provide an explicit ASM address.", + )) + })?; + opctx.authorize(authz::Action::CreateChild, &authz_pool).await?; + authz_pool + } else { + self.resolve_pool_for_allocation( opctx, params.pool, IpPoolType::Multicast, ) - .await?; + .await? + }; + + debug!( + opctx.log, + "multicast group allocation"; + "pool_selection" => if needs_ssm_pool { "ssm" } else if needs_asm_pool { "asm" } else { "explicit" }, + "pool_id" => %authz_pool.id(), + ); // Enforce ASM/SSM semantics when allocating from a pool: // - If sources are provided without an explicit IP (implicit allocation), // the pool must be SSM so we allocate an SSM address. // - If the pool is SSM and sources are empty/missing, reject. - let sources_empty = - params.source_ips.as_ref().map(|v| v.is_empty()).unwrap_or(true); - let pool_is_ssm = self.multicast_pool_is_ssm(opctx, authz_pool.id()).await?; + // Note: When needs_ssm_pool was true, we already fetched an SSM pool, + // so this check only triggers for explicitly-provided pools. if !sources_empty && params.ip.is_none() && !pool_is_ssm { let pool_id = authz_pool.id(); return Err(external::Error::invalid_request(&format!( - "Cannot allocate SSM multicast group from ASM pool {pool_id}. Choose a multicast pool with SSM ranges (IPv4 232/8, IPv6 FF3x::/32) or provide an explicit SSM address." + concat!( + "Cannot allocate SSM multicast group from ASM pool {}. ", + "Choose a multicast pool with SSM ranges ", + "(IPv4 232/8, IPv6 ff3x::/32) or provide an explicit ", + "SSM address." + ), + pool_id ))); } @@ -376,7 +452,7 @@ impl DataStore { // Fleet-scoped multicast groups always use DEFAULT_MULTICAST_VNI (77). // This reserved VNI is below MIN_GUEST_VNI (1024) and provides consistent // behavior across all multicast groups. VNI is not derived from VPC since - // groups are fleet-wide and can span multiple projects/VPCs. + // groups are fleet-scoped and can span multiple projects/VPCs. let vni = Vni(external::Vni::DEFAULT_MULTICAST_VNI); // Create the incomplete group @@ -390,9 +466,9 @@ impl DataStore { source_ips: source_ip_networks, mvlan: params.mvlan.map(|vlan_id| u16::from(vlan_id) as i16), vni, - // Set DPD tag to the group name to couple overlay/underlay entries - // for this multicast group (kept in sync on rename) - tag: Some(params.identity.name.to_string()), + // Set DPD tag to the group UUID to ensure uniqueness across lifecycle. + // This prevents tag collision when group names are reused. + tag: Some(group_id.to_string()), }, ); @@ -442,7 +518,7 @@ impl DataStore { /// Deallocate an external multicast group address for IP pool cleanup. /// /// This marks the group's IP address as deallocated by setting `time_deleted`, - /// releasing it back to the pool. This is NOT the user-initiated deletion path. + /// releasing it back to the pool. This is not the user-initiated deletion path. /// /// User-initiated deletion uses `mark_multicast_group_for_removal` which /// transitions to "Deleting" state for RPW cleanup before row removal. @@ -627,9 +703,9 @@ impl DataStore { /// Delete an underlay multicast group permanently. /// - /// This immediately removes the underlay group record from the database. It - /// sho¨ld only be called when the group is already removed from the switch - /// or when cleaning up failed operations. + /// This should only be called by the RPW reconciler after DPD cleanup. + /// Underlay groups don't have independent lifecycle, i.e. they're always + /// deleted as part of cleaning up their parent external group. pub async fn underlay_multicast_group_delete( &self, opctx: &OpContext, @@ -637,12 +713,20 @@ impl DataStore { ) -> DeleteResult { use nexus_db_schema::schema::underlay_multicast_group::dsl; - diesel::delete(dsl::underlay_multicast_group) + let deleted_rows = diesel::delete(dsl::underlay_multicast_group) .filter(dsl::id.eq(group_id)) .execute_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) - .map(|_| ()) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + if deleted_rows == 0 { + return Err(external::Error::not_found_by_id( + ResourceType::MulticastGroup, + &group_id, + )); + } + + Ok(()) } } @@ -654,9 +738,6 @@ mod tests { use nexus_types::identity::Resource; use omicron_common::address::{IpRange, Ipv4Range}; - use omicron_common::api::external::{ - IdentityMetadataUpdateParams, NameOrId, - }; use omicron_test_utils::dev; use omicron_uuid_kinds::{ GenericUuid, InstanceUuid, PropolisUuid, SledUuid, @@ -738,14 +819,13 @@ mod tests { .expect("Should link multicast pool to silo"); // Allocate first address - let params1 = params::MulticastGroupCreate { + let params1 = MulticastGroupCreate { identity: IdentityMetadataCreateParams { name: "first-group".parse().unwrap(), description: "First group".to_string(), }, multicast_ip: None, source_ips: None, - pool: Some(NameOrId::Name("exhaust-pool".parse().unwrap())), mvlan: None, }; datastore @@ -754,14 +834,13 @@ mod tests { .expect("Should create first group"); // Allocate second address - let params2 = params::MulticastGroupCreate { + let params2 = MulticastGroupCreate { identity: IdentityMetadataCreateParams { name: "second-group".parse().unwrap(), description: "Second group".to_string(), }, multicast_ip: None, source_ips: None, - pool: Some(NameOrId::Name("exhaust-pool".parse().unwrap())), mvlan: None, }; datastore @@ -770,14 +849,13 @@ mod tests { .expect("Should create second group"); // Third allocation should fail due to exhaustion - let params3 = params::MulticastGroupCreate { + let params3 = MulticastGroupCreate { identity: IdentityMetadataCreateParams { name: "third-group".parse().unwrap(), description: "Should fail".to_string(), }, multicast_ip: None, source_ips: None, - pool: Some(NameOrId::Name("exhaust-pool".parse().unwrap())), mvlan: None, }; let result3 = datastore @@ -844,14 +922,13 @@ mod tests { .expect("Should link multicast pool to silo"); // Create group without specifying pool (should use default) - let params_default = params::MulticastGroupCreate { + let params_default = MulticastGroupCreate { identity: IdentityMetadataCreateParams { name: "auto-alloc-group".parse().unwrap(), description: "Group using default pool".to_string(), }, multicast_ip: None, source_ips: None, - pool: None, // No pool specified - should use default mvlan: None, }; @@ -870,16 +947,13 @@ mod tests { ); // Create group with explicit pool name - let params_explicit = params::MulticastGroupCreate { + let params_explicit = MulticastGroupCreate { identity: IdentityMetadataCreateParams { name: "explicit-alloc-group".parse().unwrap(), description: "Group with explicit pool".to_string(), }, multicast_ip: None, source_ips: None, - pool: Some(NameOrId::Name( - "default-multicast-pool".parse().unwrap(), - )), mvlan: None, }; let group_explicit = datastore @@ -898,10 +972,9 @@ mod tests { // Test state transitions on the default pool group datastore - .multicast_group_set_state( + .multicast_group_set_active( &opctx, MulticastGroupUuid::from_untyped_uuid(group_default.id()), - MulticastGroupState::Active, ) .await .expect("Should transition default group to 'Active'"); @@ -1001,14 +1074,13 @@ mod tests { .expect("Should link multicast pool to silo"); // Create external multicast group with explicit address - let params = params::MulticastGroupCreate { + let params = MulticastGroupCreate { identity: IdentityMetadataCreateParams { name: "test-group".parse().unwrap(), description: "Comprehensive test group".to_string(), }, multicast_ip: Some("224.1.3.3".parse().unwrap()), source_ips: None, - pool: Some(NameOrId::Name("test-multicast-pool".parse().unwrap())), mvlan: None, }; @@ -1099,14 +1171,13 @@ mod tests { create_project(&opctx, &datastore, "test-project").await; // Create a multicast group using the real project - let params = params::MulticastGroupCreate { + let params = MulticastGroupCreate { identity: IdentityMetadataCreateParams { name: "parent-id-test-group".parse().unwrap(), description: "Group for parent_id testing".to_string(), }, multicast_ip: Some("224.3.1.5".parse().unwrap()), source_ips: None, - pool: Some(NameOrId::Name("parent-id-test-pool".parse().unwrap())), mvlan: None, }; @@ -1266,10 +1337,9 @@ mod tests { // Transition group to "Active" state before adding members datastore - .multicast_group_set_state( + .multicast_group_set_active( &opctx, MulticastGroupUuid::from_untyped_uuid(group.id()), - MulticastGroupState::Active, ) .await .expect("Should transition group to 'Active' state"); @@ -1566,14 +1636,13 @@ mod tests { .await .expect("Should set instance runtime state"); - let params = params::MulticastGroupCreate { + let params = MulticastGroupCreate { identity: IdentityMetadataCreateParams { name: "duplicate-test-group".parse().unwrap(), description: "Group for duplicate testing".to_string(), }, multicast_ip: Some("224.3.1.5".parse().unwrap()), source_ips: None, - pool: Some(NameOrId::Name("duplicate-test-pool".parse().unwrap())), mvlan: None, }; @@ -1584,10 +1653,9 @@ mod tests { // Transition group to "Active" state before adding members datastore - .multicast_group_set_state( + .multicast_group_set_active( &opctx, MulticastGroupUuid::from_untyped_uuid(group.id()), - MulticastGroupState::Active, ) .await .expect("Should transition group to 'Active' state"); @@ -1699,7 +1767,7 @@ mod tests { .expect("Should link pool to silo"); // Create multicast group (datastore-only; not exercising reconciler) - let group_params = params::MulticastGroupCreate { + let group_params = MulticastGroupCreate { identity: IdentityMetadataCreateParams { name: "state-test-group".parse().unwrap(), description: "Group for testing member state transitions" @@ -1707,7 +1775,6 @@ mod tests { }, multicast_ip: None, // Let it allocate from pool source_ips: None, - pool: Some(NameOrId::Name("state-test-pool".parse().unwrap())), mvlan: None, }; let group = datastore @@ -1735,10 +1802,9 @@ mod tests { // Transition group to "Active" state before adding members datastore - .multicast_group_set_state( + .multicast_group_set_active( &opctx, MulticastGroupUuid::from_untyped_uuid(group.id()), - MulticastGroupState::Active, ) .await .expect("Should transition group to 'Active' state"); @@ -1756,7 +1822,7 @@ mod tests { assert_eq!(member.state, MulticastGroupMemberState::Joining); assert_eq!(member.parent_id, test_instance_id); - // Test: Transition from "Joining" → "Joined" (simulating what the reconciler would do) + // Case: Transition from "Joining" → "Joined" (simulating what the reconciler would do) datastore .multicast_group_member_set_state( &opctx, @@ -1786,7 +1852,7 @@ mod tests { assert_eq!(members.len(), 1); assert_eq!(members[0].state, MulticastGroupMemberState::Joined); - // Test: Transition member to "Left" state (without permanent deletion) + // Case: Transition member to "Left" state (without permanent deletion) datastore .multicast_group_member_set_state( &opctx, @@ -1914,14 +1980,13 @@ mod tests { // Create group with specific IP let target_ip = "224.10.1.101".parse().unwrap(); - let params = params::MulticastGroupCreate { + let params = MulticastGroupCreate { identity: IdentityMetadataCreateParams { name: "reuse-test".parse().unwrap(), description: "Group for IP reuse test".to_string(), }, multicast_ip: Some(target_ip), source_ips: None, - pool: Some(NameOrId::Name("reuse-test-pool".parse().unwrap())), mvlan: None, }; @@ -1942,14 +2007,13 @@ mod tests { assert_eq!(deleted, true, "Should successfully deallocate the group"); // Create another group with the same IP - should succeed due to time_deleted filtering - let params2 = params::MulticastGroupCreate { + let params2 = MulticastGroupCreate { identity: IdentityMetadataCreateParams { name: "reuse-test-2".parse().unwrap(), description: "Second group reusing same IP".to_string(), }, multicast_ip: Some(target_ip), source_ips: None, - pool: Some(NameOrId::Name("reuse-test-pool".parse().unwrap())), mvlan: None, }; @@ -2028,14 +2092,13 @@ mod tests { .expect("Should link pool to silo"); // Exhaust the pool - let params1 = params::MulticastGroupCreate { + let params1 = MulticastGroupCreate { identity: IdentityMetadataCreateParams { name: "cycle-test-1".parse().unwrap(), description: "First group to exhaust pool".to_string(), }, multicast_ip: None, source_ips: None, - pool: Some(NameOrId::Name("cycle-test-pool".parse().unwrap())), mvlan: None, }; @@ -2046,14 +2109,13 @@ mod tests { let allocated_ip = group1.multicast_ip.ip(); // Try to create another group - should fail due to exhaustion - let params2 = params::MulticastGroupCreate { + let params2 = MulticastGroupCreate { identity: IdentityMetadataCreateParams { name: "cycle-test-2".parse().unwrap(), description: "Second group should fail".to_string(), }, multicast_ip: None, source_ips: None, - pool: Some(NameOrId::Name("cycle-test-pool".parse().unwrap())), mvlan: None, }; @@ -2076,7 +2138,7 @@ mod tests { assert_eq!(deleted, true, "Should successfully deallocate the group"); // Now creating a new group should succeed - let params3 = params::MulticastGroupCreate { + let params3 = MulticastGroupCreate { identity: IdentityMetadataCreateParams { name: "cycle-test-3".parse().unwrap(), description: "Third group should succeed after deletion" @@ -2084,7 +2146,6 @@ mod tests { }, multicast_ip: None, source_ips: None, - pool: Some(NameOrId::Name("cycle-test-pool".parse().unwrap())), mvlan: None, }; @@ -2164,14 +2225,13 @@ mod tests { .expect("Should link pool to silo"); // Create a group - let params = params::MulticastGroupCreate { + let params = MulticastGroupCreate { identity: IdentityMetadataCreateParams { name: "dealloc-test".parse().unwrap(), description: "Group for deallocation testing".to_string(), }, multicast_ip: None, source_ips: None, - pool: Some(NameOrId::Name("dealloc-test-pool".parse().unwrap())), mvlan: None, }; @@ -2289,7 +2349,7 @@ mod tests { .expect("Should link multicast pool to silo"); // Test creating a multicast group - let params = params::MulticastGroupCreate { + let params = MulticastGroupCreate { identity: IdentityMetadataCreateParams { name: "fetch-test-group".parse().unwrap(), description: "Test group for fetch operations".to_string(), @@ -2299,7 +2359,6 @@ mod tests { "10.0.0.1".parse().unwrap(), "10.0.0.2".parse().unwrap(), ]), - pool: Some(NameOrId::Name("fetch-test-pool".parse().unwrap())), mvlan: None, }; @@ -2399,41 +2458,38 @@ mod tests { .await .expect("Should link multicast pool to silo"); - // Create fleet-wide multicast groups - let params_1 = params::MulticastGroupCreate { + // Create fleet-scoped multicast groups + let params_1 = MulticastGroupCreate { identity: IdentityMetadataCreateParams { name: "fleet-group-1".parse().unwrap(), description: "Fleet-wide group 1".to_string(), }, multicast_ip: Some("224.100.20.10".parse().unwrap()), source_ips: None, - pool: Some(NameOrId::Name("list-test-pool".parse().unwrap())), mvlan: None, }; - let params_2 = params::MulticastGroupCreate { + let params_2 = MulticastGroupCreate { identity: IdentityMetadataCreateParams { name: "fleet-group-2".parse().unwrap(), description: "Fleet-wide group 2".to_string(), }, multicast_ip: Some("224.100.20.11".parse().unwrap()), source_ips: None, - pool: Some(NameOrId::Name("list-test-pool".parse().unwrap())), mvlan: None, }; - let params_3 = params::MulticastGroupCreate { + let params_3 = MulticastGroupCreate { identity: IdentityMetadataCreateParams { name: "fleet-group-3".parse().unwrap(), description: "Fleet-wide group 3".to_string(), }, multicast_ip: Some("224.100.20.12".parse().unwrap()), source_ips: None, - pool: Some(NameOrId::Name("list-test-pool".parse().unwrap())), mvlan: None, }; - // Create groups (all are fleet-wide) + // Create groups (all are fleet-scoped) datastore .multicast_group_create(&opctx, ¶ms_1, Some(authz_pool.clone())) .await @@ -2449,7 +2505,7 @@ mod tests { .await .expect("Should create fleet-group-3"); - // List all groups fleet-wide - should get 3 groups + // List all groups (fleet-scoped) - should get 3 groups let pagparams = DataPageParams { marker: None, direction: external::PaginationOrder::Ascending, @@ -2461,9 +2517,9 @@ mod tests { let groups = datastore .multicast_groups_list(&opctx, &paginated_by) .await - .expect("Should list all fleet-wide groups"); + .expect("Should list all fleet-scoped groups"); - assert_eq!(groups.len(), 3, "Should have 3 fleet-wide groups"); + assert_eq!(groups.len(), 3, "Should have 3 fleet-scoped groups"); // Verify the groups have the correct names let group_names: Vec<_> = @@ -2531,14 +2587,13 @@ mod tests { .await .expect("Should link multicast pool to silo"); - let params = params::MulticastGroupCreate { + let params = MulticastGroupCreate { identity: IdentityMetadataCreateParams { name: "state-test-group".parse().unwrap(), description: "Test group for state transitions".to_string(), }, multicast_ip: Some("224.100.30.5".parse().unwrap()), source_ips: None, - pool: Some(NameOrId::Name("state-test-pool".parse().unwrap())), mvlan: None, }; @@ -2552,10 +2607,9 @@ mod tests { // Test transition to "Active" datastore - .multicast_group_set_state( + .multicast_group_set_active( &opctx, MulticastGroupUuid::from_untyped_uuid(group.id()), - MulticastGroupState::Active, ) .await .expect("Should transition to 'Active'"); @@ -2571,32 +2625,39 @@ mod tests { assert_eq!(updated_group.state, MulticastGroupState::Active); // Test transition to "Deleting" - datastore - .multicast_group_set_state( + // Since this group has no members, it should be marked for deletion + let marked = datastore + .mark_multicast_group_for_removal_if_no_members( &opctx, MulticastGroupUuid::from_untyped_uuid(group.id()), - MulticastGroupState::Deleting, ) .await .expect("Should transition to 'Deleting'"); + assert!(marked, "Group with no members should be marked for deletion"); - let deleting_group = datastore - .multicast_group_fetch( + // Note: After marking for removal, group has `time_deleted` set, + // so it won't show up in regular fetch (which filters `time_deleted IS NULL`). + // We can verify by listing groups in Deleting state which includes deleted groups. + let deleting_groups = datastore + .multicast_groups_list_by_state( &opctx, - MulticastGroupUuid::from_untyped_uuid(group.id()), + MulticastGroupState::Deleting, + &DataPageParams::max_page(), ) .await - .expect("Should fetch deleting group"); + .expect("Should list deleting groups"); - assert_eq!(deleting_group.state, MulticastGroupState::Deleting); + assert_eq!(deleting_groups.len(), 1); + assert_eq!(deleting_groups[0].id(), group.id()); + assert_eq!(deleting_groups[0].state, MulticastGroupState::Deleting); + assert!(deleting_groups[0].time_deleted().is_some()); // Test trying to update non-existent group let fake_id = Uuid::new_v4(); let result = datastore - .multicast_group_set_state( + .multicast_group_set_active( &opctx, MulticastGroupUuid::from_untyped_uuid(fake_id), - MulticastGroupState::Active, ) .await; assert!(result.is_err()); @@ -2729,163 +2790,4 @@ mod tests { db.terminate().await; logctx.cleanup_successful(); } - - #[tokio::test] - async fn test_multicast_group_update() { - let logctx = dev::test_setup_log("test_multicast_group_update"); - let db = TestDatabase::new_with_datastore(&logctx.log).await; - let (opctx, datastore) = (db.opctx(), db.datastore()); - - // Create test setup - let setup = multicast::create_test_setup( - &opctx, - &datastore, - "test-pool", - "test-project", - ) - .await; - - // Create initial multicast group - let group = multicast::create_test_group( - &opctx, - &datastore, - &setup, - "original-group", - "224.10.1.100", - ) - .await; - - // Verify original values - assert_eq!(group.name().as_str(), "original-group"); - assert_eq!(group.description(), "Test group: original-group"); - assert_eq!(group.source_ips.len(), 0); // Empty array initially - - // Test updating name and description - let update_params = params::MulticastGroupUpdate { - identity: IdentityMetadataUpdateParams { - name: Some("updated-group".parse().unwrap()), - description: Some("Updated group description".to_string()), - }, - source_ips: None, - mvlan: None, - }; - - let updated_group = datastore - .multicast_group_update( - &opctx, - MulticastGroupUuid::from_untyped_uuid(group.id()), - &update_params, - ) - .await - .expect("Should update multicast group"); - - // Verify updated identity fields - assert_eq!(updated_group.name().as_str(), "updated-group"); - assert_eq!(updated_group.description(), "Updated group description"); - assert_eq!(updated_group.id(), group.id()); // ID should not change - assert_eq!(updated_group.multicast_ip, group.multicast_ip); // IP should not change - assert!(updated_group.time_modified() > group.time_modified()); // Modified time should advance - - // Test updating source IPs (Source-Specific Multicast) - let source_ip_update = params::MulticastGroupUpdate { - identity: IdentityMetadataUpdateParams { - name: None, - description: None, - }, - source_ips: Some(vec![ - "10.1.1.10".parse().unwrap(), - "10.1.1.20".parse().unwrap(), - ]), - mvlan: None, - }; - - let group_with_sources = datastore - .multicast_group_update( - &opctx, - MulticastGroupUuid::from_untyped_uuid(updated_group.id()), - &source_ip_update, - ) - .await - .expect("Should update source IPs"); - - // Verify source IPs were updated - assert_eq!(group_with_sources.source_ips.len(), 2); - let source_addrs: Vec<_> = - group_with_sources.source_ips.iter().map(|ip| ip.ip()).collect(); - assert!(source_addrs.contains(&"10.1.1.10".parse().unwrap())); - assert!(source_addrs.contains(&"10.1.1.20".parse().unwrap())); - - // Test updating all fields at once - let complete_update = params::MulticastGroupUpdate { - identity: IdentityMetadataUpdateParams { - name: Some("final-group".parse().unwrap()), - description: Some("Final group description".to_string()), - }, - source_ips: Some(vec!["192.168.1.1".parse().unwrap()]), - mvlan: None, - }; - - let final_group = datastore - .multicast_group_update( - &opctx, - MulticastGroupUuid::from_untyped_uuid(group_with_sources.id()), - &complete_update, - ) - .await - .expect("Should update all fields"); - - assert_eq!(final_group.name().as_str(), "final-group"); - assert_eq!(final_group.description(), "Final group description"); - assert_eq!(final_group.source_ips.len(), 1); - assert_eq!( - final_group.source_ips[0].ip(), - "192.168.1.1".parse::().unwrap() - ); - - // Test updating nonexistent group - should fail - let nonexistent_id = MulticastGroupUuid::new_v4(); - let failed_update = datastore - .multicast_group_update(&opctx, nonexistent_id, &update_params) - .await; - - assert!(failed_update.is_err()); - match failed_update.err().unwrap() { - Error::ObjectNotFound { .. } => { - // Expected error for nonexistent group - } - other => panic!("Expected ObjectNotFound error, got: {:?}", other), - } - - // Test updating deleted group - should fail - // First soft-delete the group (sets time_deleted) - datastore - .deallocate_external_multicast_group( - &opctx, - MulticastGroupUuid::from_untyped_uuid(final_group.id()), - ) - .await - .expect("Should soft-delete group"); - - let deleted_update = datastore - .multicast_group_update( - &opctx, - MulticastGroupUuid::from_untyped_uuid(final_group.id()), - &update_params, - ) - .await; - - assert!(deleted_update.is_err()); - match deleted_update.err().unwrap() { - Error::ObjectNotFound { .. } => { - // Expected - soft-deleted groups should not be updatable - } - other => panic!( - "Expected ObjectNotFound error for deleted group, got: {:?}", - other - ), - } - - db.terminate().await; - logctx.cleanup_successful(); - } } diff --git a/nexus/db-queries/src/db/datastore/multicast/members.rs b/nexus/db-queries/src/db/datastore/multicast/members.rs index 21a80446732..6a74525ea0e 100644 --- a/nexus/db-queries/src/db/datastore/multicast/members.rs +++ b/nexus/db-queries/src/db/datastore/multicast/members.rs @@ -56,11 +56,32 @@ impl DataStore { group_id: MulticastGroupUuid, instance_id: InstanceUuid, ) -> CreateResult { + use nexus_db_schema::schema::multicast_group::dsl; + let conn = self.pool_connection_authorized(opctx).await?; + + // Fetch the group's multicast_ip + let group_multicast_ip: ipnetwork::IpNetwork = dsl::multicast_group + .filter(dsl::id.eq(group_id.into_untyped_uuid())) + .filter(dsl::time_deleted.is_null()) + .select(dsl::multicast_ip) + .first_async(&*conn) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::MulticastGroup, + LookupType::ById(group_id.into_untyped_uuid()), + ), + ) + })?; + self.multicast_group_member_add_with_conn( opctx, &conn, group_id.into_untyped_uuid(), + group_multicast_ip, instance_id.into_untyped_uuid(), ) .await @@ -83,6 +104,7 @@ impl DataStore { opctx: &OpContext, conn: &async_bb8_diesel::Connection, group_id: Uuid, + multicast_ip: ipnetwork::IpNetwork, instance_id: Uuid, ) -> CreateResult { use nexus_db_schema::schema::multicast_group_member::dsl; @@ -123,6 +145,7 @@ impl DataStore { id: Uuid::new_v4(), parent_id: instance_id, external_group_id: group_id, + multicast_ip, sled_id, state: MulticastGroupMemberState::Joining, time_created: Utc::now(), @@ -331,24 +354,16 @@ impl DataStore { /// List multicast group memberships for a specific instance. /// - /// If `include_removed` is true, includes memberships that have been - /// marked removed (i.e., rows with `time_deleted` set). Otherwise only - /// returns active memberships. + /// Only returns active (non-deleted) memberships. pub async fn multicast_group_members_list_by_instance( &self, opctx: &OpContext, instance_id: InstanceUuid, - include_removed: bool, ) -> ListResultVec { use nexus_db_schema::schema::multicast_group_member::dsl; - let mut query = dsl::multicast_group_member.into_boxed(); - - if !include_removed { - query = query.filter(dsl::time_deleted.is_null()); - } - - query + dsl::multicast_group_member + .filter(dsl::time_deleted.is_null()) .filter(dsl::parent_id.eq(instance_id.into_untyped_uuid())) .order(dsl::id.asc()) .select(MulticastGroupMember::as_select()) @@ -762,7 +777,11 @@ impl DataStore { .map(|_| ()) } - /// Permanently delete a multicast group member by ID. + /// Mark a multicast group member for deletion by ID. + /// + /// This performs a soft delete by setting the member to "Left" state and + /// setting `time_deleted`. The RPW reconciler will remove the member from + /// DPD, and later cleanup will hard-delete the database record. pub async fn multicast_group_member_delete_by_id( &self, opctx: &OpContext, @@ -770,13 +789,22 @@ impl DataStore { ) -> DeleteResult { use nexus_db_schema::schema::multicast_group_member::dsl; - let deleted_rows = diesel::delete(dsl::multicast_group_member) + let now = Utc::now(); + + let updated_rows = diesel::update(dsl::multicast_group_member) .filter(dsl::id.eq(member_id)) + .filter(dsl::time_deleted.is_null()) + .set(( + dsl::state.eq(MulticastGroupMemberState::Left), + dsl::sled_id.eq(Option::>::None), + dsl::time_deleted.eq(Some(now)), + dsl::time_modified.eq(now), + )) .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; - if deleted_rows == 0 { + if updated_rows == 0 { return Err(external::Error::not_found_by_id( ResourceType::MulticastGroupMember, &member_id, @@ -785,9 +813,9 @@ impl DataStore { debug!( opctx.log, - "multicast group member deletion completed"; + "multicast group member marked for deletion"; "member_id" => %member_id, - "rows_deleted" => deleted_rows + "rows_updated" => updated_rows ); Ok(()) @@ -825,8 +853,8 @@ impl DataStore { mod tests { use super::*; - use nexus_types::external_api::params; use nexus_types::identity::Resource; + use nexus_types::multicast::MulticastGroupCreate; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_test_utils::dev; use omicron_uuid_kinds::SledUuid; @@ -890,7 +918,7 @@ mod tests { .await; // Create creating group manually (needs to stay in "Creating" state) - let creating_group_params = params::MulticastGroupCreate { + let creating_group_params = MulticastGroupCreate { identity: IdentityMetadataCreateParams { name: "creating-group".parse().unwrap(), description: "Creating test group".to_string(), @@ -898,7 +926,6 @@ mod tests { multicast_ip: Some("224.10.1.6".parse().unwrap()), source_ips: None, // Pool resolved via authz_pool argument to datastore call - pool: None, mvlan: None, }; @@ -1227,7 +1254,7 @@ mod tests { assert_eq!(member1_2.parent_id, *instance1_id); assert_eq!(member2_1.parent_id, *instance2_id); - // Detach all memberships for instance1 (transitions to Left, does NOT set time_deleted) + // Detach all memberships for instance1 (transitions to "Left", does not set time_deleted) datastore .multicast_group_members_detach_by_instance( &opctx, @@ -1236,7 +1263,7 @@ mod tests { .await .expect("Should detach all memberships for instance1"); - // Verify time_deleted was NOT set (members still exist, just in Left state) + // Verify time_deleted was not set (members still exist, just in "Left" state) let detached_member1 = datastore .multicast_group_member_get_by_id(&opctx, member1_1.id, false) .await @@ -1245,7 +1272,7 @@ mod tests { assert_eq!(detached_member1.state, MulticastGroupMemberState::Left); assert!( detached_member1.time_deleted.is_none(), - "detach_by_instance should NOT set time_deleted" + "detach_by_instance should not set time_deleted" ); assert!( detached_member1.sled_id.is_none(), @@ -1378,7 +1405,6 @@ mod tests { .multicast_group_members_list_by_instance( &opctx, InstanceUuid::from_untyped_uuid(*instance_id), - false, ) .await .expect("Should list memberships for instance"); @@ -1753,6 +1779,7 @@ mod tests { time_modified: Utc::now(), time_deleted: Some(Utc::now()), external_group_id: group.id(), + multicast_ip: group.multicast_ip, parent_id: instance1_id, sled_id: Some(setup.sled_id.into()), state: MulticastGroupMemberState::Left, @@ -1762,7 +1789,7 @@ mod tests { .await .expect("Should create member1 record"); - // Member 2: "Left" but no `time_deleted` (should NOT be deleted) + // Member 2: "Left" but no `time_deleted` (should not be deleted) let member2: MulticastGroupMember = diesel::insert_into(dsl::multicast_group_member) .values(MulticastGroupMemberValues { @@ -1771,6 +1798,7 @@ mod tests { time_modified: Utc::now(), time_deleted: None, external_group_id: group.id(), + multicast_ip: group.multicast_ip, parent_id: instance2_id, sled_id: Some(setup.sled_id.into()), state: MulticastGroupMemberState::Left, @@ -1780,7 +1808,7 @@ mod tests { .await .expect("Should create member2 record"); - // Member 3: "Joined" state (should NOT be deleted, even if it had time_deleted) + // Member 3: "Joined" state (should not be deleted, even if it had time_deleted) let member3: MulticastGroupMember = diesel::insert_into(dsl::multicast_group_member) .values(MulticastGroupMemberValues { @@ -1789,6 +1817,7 @@ mod tests { time_modified: Utc::now(), time_deleted: Some(Utc::now()), // Has time_deleted but is Joined, so won't be cleaned up external_group_id: group.id(), + multicast_ip: group.multicast_ip, parent_id: instance3_id, sled_id: Some(setup.sled_id.into()), state: MulticastGroupMemberState::Joined, @@ -2398,7 +2427,7 @@ mod tests { .expect("Member1_2 should exist"); assert!(marked_member1_2.time_deleted.is_some()); - // Verify instance2 membership is NOT marked for removal + // Verify instance2 membership is not marked for removal let unmarked_member2_1 = datastore .multicast_group_member_get_by_id(&opctx, member2_1.id, true) .await @@ -3036,7 +3065,7 @@ mod tests { .expect("First attach should succeed"); // Transition member to "Left" state and clear sled_id (simulating instance stop) - // This does NOT set time_deleted - only stopped instances can be reactivated + // This does not set time_deleted - only stopped instances can be reactivated datastore .multicast_group_members_detach_by_instance( &opctx, @@ -3054,7 +3083,7 @@ mod tests { assert_eq!(member_stopped.state, MulticastGroupMemberState::Left); assert!( member_stopped.time_deleted.is_none(), - "time_deleted should NOT be set for stopped instances" + "time_deleted should not be set for stopped instances" ); assert!(member_stopped.sled_id.is_none(), "sled_id should be cleared"); @@ -3182,7 +3211,6 @@ mod tests { .multicast_group_members_list_by_instance( &opctx, InstanceUuid::from_untyped_uuid(instance_id), - false, // include_removed = false ) .await .expect("List members should succeed"); diff --git a/nexus/db-queries/src/db/datastore/multicast/ops/member_attach.rs b/nexus/db-queries/src/db/datastore/multicast/ops/member_attach.rs index 886a223d1db..8ed4efe52bc 100644 --- a/nexus/db-queries/src/db/datastore/multicast/ops/member_attach.rs +++ b/nexus/db-queries/src/db/datastore/multicast/ops/member_attach.rs @@ -209,12 +209,16 @@ impl RunQueryDsl for AttachMemberToGroupStatement {} /// database operation. impl AttachMemberToGroupStatement { /// Generates the `active_group` CTE (checks if group exists and is active). + /// + /// Returns id and multicast_ip for use in the member insert. fn push_active_group_cte<'a>( &'a self, mut out: AstPass<'_, 'a, Pg>, ) -> QueryResult<()> { use nexus_db_model::MulticastGroupState; - out.push_sql("SELECT id FROM multicast_group WHERE id = "); + out.push_sql( + "SELECT id, multicast_ip FROM multicast_group WHERE id = ", + ); out.push_bind_param::(&self.group_id)?; out.push_sql(" AND state = "); out.push_sql(super::group_state_as_sql_literal( @@ -247,7 +251,8 @@ impl AttachMemberToGroupStatement { /// /// SELECT joins with both `active_group` and `instance_sled` CTEs to: /// 1. Ensure group is active (FROM active_group) - /// 2. Retrieve instance's current sled_id (CROSS JOIN instance_sled) + /// 2. Retrieve group's multicast_ip (FROM active_group) + /// 3. Retrieve instance's current sled_id (CROSS JOIN instance_sled) /// /// ON CONFLICT clause uses partial unique index (only rows with time_deleted IS NULL): /// - Conflict only for members with time_deleted=NULL (active or stopped) @@ -257,10 +262,12 @@ impl AttachMemberToGroupStatement { &'a self, mut out: AstPass<'_, 'a, Pg>, ) -> QueryResult<()> { + // Column order matches schema: id, time_created, time_modified, + // external_group_id, multicast_ip, parent_id, sled_id, state out.push_sql( "INSERT INTO multicast_group_member (\ id, time_created, time_modified, external_group_id, \ - parent_id, sled_id, state) SELECT ", + multicast_ip, parent_id, sled_id, state) SELECT ", ); out.push_bind_param::(&self.new_member_id)?; out.push_sql(", "); @@ -269,7 +276,7 @@ impl AttachMemberToGroupStatement { out.push_bind_param::(&self.time_modified)?; out.push_sql(", "); out.push_bind_param::(&self.group_id)?; - out.push_sql(", "); + out.push_sql(", active_group.multicast_ip, "); out.push_bind_param::(&self.instance_id)?; out.push_sql(", instance_sled.sled_id, "); out.push_sql(super::member_state_as_sql_literal( diff --git a/nexus/db-queries/src/db/datastore/multicast/ops/member_reconcile.rs b/nexus/db-queries/src/db/datastore/multicast/ops/member_reconcile.rs index 5c837c2396f..0c42a6ed5c1 100644 --- a/nexus/db-queries/src/db/datastore/multicast/ops/member_reconcile.rs +++ b/nexus/db-queries/src/db/datastore/multicast/ops/member_reconcile.rs @@ -118,7 +118,6 @@ pub async fn reconcile_joining_member( instance_valid: bool, current_sled_id: Option>, ) -> Result { - // First, read the current member state let member_opt: Option = dsl::multicast_group_member .filter(dsl::external_group_id.eq(group_id)) .filter(dsl::parent_id.eq(instance_id)) diff --git a/nexus/db-queries/src/db/pub_test_utils/multicast.rs b/nexus/db-queries/src/db/pub_test_utils/multicast.rs index dbbbcd638e0..56934b90324 100644 --- a/nexus/db-queries/src/db/pub_test_utils/multicast.rs +++ b/nexus/db-queries/src/db/pub_test_utils/multicast.rs @@ -8,7 +8,6 @@ use std::net::Ipv4Addr; use uuid::Uuid; -use nexus_db_model::MulticastGroupState; use nexus_db_model::{ IncompleteVpc, IpPool, IpPoolReservationType, IpPoolResource, IpPoolResourceType, IpVersion, @@ -16,6 +15,7 @@ use nexus_db_model::{ use nexus_types::external_api::params; use nexus_types::external_api::shared::{IpRange, Ipv4Range}; use nexus_types::identity::Resource; +use nexus_types::multicast::MulticastGroupCreate; use omicron_common::api::external::{IdentityMetadataCreateParams, LookupType}; use omicron_uuid_kinds::{GenericUuid, MulticastGroupUuid, SledUuid}; @@ -187,14 +187,13 @@ pub async fn create_test_group_with_state( multicast_ip: &str, make_active: bool, ) -> nexus_db_model::ExternalMulticastGroup { - let params = params::MulticastGroupCreate { + let params = MulticastGroupCreate { identity: IdentityMetadataCreateParams { name: group_name.parse().unwrap(), description: format!("Test group: {}", group_name), }, multicast_ip: Some(multicast_ip.parse().unwrap()), source_ips: None, - pool: None, mvlan: None, }; @@ -205,10 +204,9 @@ pub async fn create_test_group_with_state( if make_active { datastore - .multicast_group_set_state( + .multicast_group_set_active( opctx, MulticastGroupUuid::from_untyped_uuid(group.id()), - MulticastGroupState::Active, ) .await .expect("Should transition group to 'Active' state"); diff --git a/nexus/db-queries/src/db/queries/external_multicast_group.rs b/nexus/db-queries/src/db/queries/external_multicast_group.rs index 2324e3bc4b1..833c13c94b7 100644 --- a/nexus/db-queries/src/db/queries/external_multicast_group.rs +++ b/nexus/db-queries/src/db/queries/external_multicast_group.rs @@ -217,6 +217,39 @@ impl NextExternalMulticastGroup { out.push_sql(" AND time_deleted IS NULL"); Ok(()) } + + /// Push a subquery to update the `ip_pool_range` table's rcgen, if we've + /// successfully allocated a multicast group from that range. + /// + /// This prevents race conditions where a range is deleted while a + /// multicast group allocation is in progress. The range deletion checks + /// the rcgen value, and this increment ensures it changes when an + /// allocation occurs. + fn push_update_ip_pool_range_subquery<'a>( + &'a self, + mut out: AstPass<'_, 'a, Pg>, + ) -> QueryResult<()> { + use schema::ip_pool_range::dsl; + out.push_sql("UPDATE "); + schema::ip_pool_range::table.walk_ast(out.reborrow())?; + out.push_sql(" SET "); + out.push_identifier(dsl::time_modified::NAME)?; + out.push_sql(" = "); + out.push_bind_param::>( + &self.now, + )?; + out.push_sql(", "); + out.push_identifier(dsl::rcgen::NAME)?; + out.push_sql(" = "); + out.push_identifier(dsl::rcgen::NAME)?; + out.push_sql(" + 1 WHERE "); + out.push_identifier(dsl::id::NAME)?; + out.push_sql(" = (SELECT ip_pool_range_id FROM next_external_multicast_group) AND "); + out.push_identifier(dsl::time_deleted::NAME)?; + out.push_sql(" IS NULL RETURNING "); + out.push_identifier(dsl::id::NAME)?; + Ok(()) + } } impl QueryFragment for NextExternalMulticastGroup { @@ -246,6 +279,11 @@ impl QueryFragment for NextExternalMulticastGroup { WHERE NOT EXISTS (SELECT 1 FROM previously_allocated_group) RETURNING id, name, description, time_created, time_modified, time_deleted, ip_pool_id, ip_pool_range_id, vni, multicast_ip, source_ips, mvlan, underlay_group_id, tag, state, version_added, version_removed", ); + out.push_sql("), "); + + // Update the IP pool range's rcgen to prevent race conditions with range deletion + out.push_sql("updated_pool_range AS ("); + self.push_update_ip_pool_range_subquery(out.reborrow())?; out.push_sql(") "); // Return either the newly inserted or previously allocated group diff --git a/nexus/db-queries/tests/output/authz-roles.out b/nexus/db-queries/tests/output/authz-roles.out index f16753bbc88..5b9a57fd22a 100644 --- a/nexus/db-queries/tests/output/authz-roles.out +++ b/nexus/db-queries/tests/output/authz-roles.out @@ -137,19 +137,19 @@ resource: authz::IpPoolList resource: authz::MulticastGroupList USER Q R LC RP M MP CC D - fleet-admin ✘ ✘ ✔ ✘ ✘ ✘ ✔ ✘ - fleet-collaborator ✘ ✘ ✔ ✘ ✘ ✘ ✔ ✘ - fleet-viewer ✘ ✘ ✔ ✘ ✘ ✘ ✔ ✘ - silo1-admin ✘ ✘ ✔ ✘ ✘ ✘ ✔ ✘ - silo1-collaborator ✘ ✘ ✔ ✘ ✘ ✘ ✔ ✘ - silo1-limited-collaborator ✘ ✘ ✔ ✘ ✘ ✘ ✔ ✘ - silo1-viewer ✘ ✘ ✔ ✘ ✘ ✘ ✔ ✘ - silo1-proj1-admin ✘ ✘ ✔ ✘ ✘ ✘ ✔ ✘ - silo1-proj1-collaborator ✘ ✘ ✔ ✘ ✘ ✘ ✔ ✘ - silo1-proj1-limited-collaborator ✘ ✘ ✔ ✘ ✘ ✘ ✔ ✘ - silo1-proj1-viewer ✘ ✘ ✔ ✘ ✘ ✘ ✔ ✘ + fleet-admin ✘ ✘ ✔ ✘ ✘ ✘ ✘ ✘ + fleet-collaborator ✘ ✘ ✔ ✘ ✘ ✘ ✘ ✘ + fleet-viewer ✘ ✘ ✔ ✘ ✘ ✘ ✘ ✘ + silo1-admin ✘ ✘ ✔ ✘ ✘ ✘ ✘ ✘ + silo1-collaborator ✘ ✘ ✔ ✘ ✘ ✘ ✘ ✘ + silo1-limited-collaborator ✘ ✘ ✔ ✘ ✘ ✘ ✘ ✘ + silo1-viewer ✘ ✘ ✔ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-admin ✘ ✘ ✔ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-collaborator ✘ ✘ ✔ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-limited-collaborator ✘ ✘ ✔ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-viewer ✘ ✘ ✔ ✘ ✘ ✘ ✘ ✘ unauthenticated ! ! ! ! ! ! ! ! - scim ✘ ✘ ✔ ✘ ✘ ✘ ✔ ✘ + scim ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ resource: authz::QuiesceState @@ -528,19 +528,19 @@ resource: Disk "silo1-proj1-disk1" resource: MulticastGroup "silo1-proj1-multicast-group1" USER Q R LC RP M MP CC D - fleet-admin ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ - fleet-collaborator ✘ ✔ ✔ ✔ ✔ ✔ ✘ ✔ - fleet-viewer ✘ ✔ ✔ ✔ ✔ ✔ ✘ ✔ - silo1-admin ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ - silo1-collaborator ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ - silo1-limited-collaborator ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ - silo1-viewer ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ - silo1-proj1-admin ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ - silo1-proj1-collaborator ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ - silo1-proj1-limited-collaborator ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ - silo1-proj1-viewer ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ + fleet-admin ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + fleet-collaborator ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + fleet-viewer ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-admin ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-collaborator ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-limited-collaborator ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-viewer ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-proj1-admin ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-proj1-collaborator ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-proj1-limited-collaborator ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-proj1-viewer ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ unauthenticated ! ! ! ! ! ! ! ! - scim ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ + scim ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ resource: AffinityGroup "silo1-proj1-affinity-group1" @@ -800,19 +800,19 @@ resource: Disk "silo1-proj2-disk1" resource: MulticastGroup "silo1-proj2-multicast-group1" USER Q R LC RP M MP CC D - fleet-admin ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ - fleet-collaborator ✘ ✔ ✔ ✔ ✔ ✔ ✘ ✔ - fleet-viewer ✘ ✔ ✔ ✔ ✔ ✔ ✘ ✔ - silo1-admin ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ - silo1-collaborator ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ - silo1-limited-collaborator ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ - silo1-viewer ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ - silo1-proj1-admin ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ - silo1-proj1-collaborator ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ - silo1-proj1-limited-collaborator ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ - silo1-proj1-viewer ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ + fleet-admin ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + fleet-collaborator ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + fleet-viewer ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-admin ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-collaborator ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-limited-collaborator ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-viewer ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-proj1-admin ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-proj1-collaborator ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-proj1-limited-collaborator ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-proj1-viewer ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ unauthenticated ! ! ! ! ! ! ! ! - scim ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ + scim ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ resource: AffinityGroup "silo1-proj2-affinity-group1" @@ -1361,19 +1361,19 @@ resource: Disk "silo2-proj1-disk1" resource: MulticastGroup "silo2-proj1-multicast-group1" USER Q R LC RP M MP CC D - fleet-admin ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ - fleet-collaborator ✘ ✔ ✔ ✔ ✔ ✔ ✘ ✔ - fleet-viewer ✘ ✔ ✔ ✔ ✔ ✔ ✘ ✔ - silo1-admin ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ - silo1-collaborator ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ - silo1-limited-collaborator ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ - silo1-viewer ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ - silo1-proj1-admin ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ - silo1-proj1-collaborator ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ - silo1-proj1-limited-collaborator ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ - silo1-proj1-viewer ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ + fleet-admin ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + fleet-collaborator ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + fleet-viewer ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-admin ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-collaborator ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-limited-collaborator ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-viewer ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-proj1-admin ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-proj1-collaborator ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-proj1-limited-collaborator ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-proj1-viewer ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ unauthenticated ! ! ! ! ! ! ! ! - scim ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ + scim ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ resource: AffinityGroup "silo2-proj1-affinity-group1" diff --git a/nexus/db-schema/src/schema.rs b/nexus/db-schema/src/schema.rs index 8be6bb768c2..b6cc91c53db 100644 --- a/nexus/db-schema/src/schema.rs +++ b/nexus/db-schema/src/schema.rs @@ -2794,6 +2794,7 @@ table! { time_modified -> Timestamptz, time_deleted -> Nullable, external_group_id -> Uuid, + multicast_ip -> Inet, parent_id -> Uuid, sled_id -> Nullable, state -> crate::enums::MulticastGroupMemberStateEnum, @@ -2815,6 +2816,9 @@ table! { } } +// Allow multicast tables to appear together for NOT EXISTS subqueries +allow_tables_to_appear_in_same_query!(multicast_group, multicast_group_member); + allow_tables_to_appear_in_same_query!(user_data_export, snapshot, image); table! { diff --git a/nexus/external-api/Cargo.toml b/nexus/external-api/Cargo.toml index 6c4032534c8..f05fdb6c22f 100644 --- a/nexus/external-api/Cargo.toml +++ b/nexus/external-api/Cargo.toml @@ -9,6 +9,7 @@ workspace = true [dependencies] anyhow.workspace = true +base64.workspace = true chrono.workspace = true dropshot-api-manager-types.workspace = true dropshot.workspace = true diff --git a/nexus/external-api/output/nexus_tags.txt b/nexus/external-api/output/nexus_tags.txt index 929fb6ce25e..022da349565 100644 --- a/nexus/external-api/output/nexus_tags.txt +++ b/nexus/external-api/output/nexus_tags.txt @@ -52,14 +52,10 @@ instance_affinity_group_list GET /v1/instances/{instance}/affin instance_multicast_group_join PUT /v1/instances/{instance}/multicast-groups/{multicast_group} instance_multicast_group_leave DELETE /v1/instances/{instance}/multicast-groups/{multicast_group} instance_multicast_group_list GET /v1/instances/{instance}/multicast-groups -lookup_multicast_group_by_ip GET /v1/system/multicast-groups/by-ip/{address} -multicast_group_create POST /v1/multicast-groups -multicast_group_delete DELETE /v1/multicast-groups/{multicast_group} multicast_group_list GET /v1/multicast-groups multicast_group_member_add POST /v1/multicast-groups/{multicast_group}/members multicast_group_member_list GET /v1/multicast-groups/{multicast_group}/members multicast_group_member_remove DELETE /v1/multicast-groups/{multicast_group}/members/{instance} -multicast_group_update PUT /v1/multicast-groups/{multicast_group} multicast_group_view GET /v1/multicast-groups/{multicast_group} probe_create POST /experimental/v1/probes probe_delete DELETE /experimental/v1/probes/{probe} diff --git a/nexus/external-api/src/lib.rs b/nexus/external-api/src/lib.rs index 39940991ee9..436c3c7dbd8 100644 --- a/nexus/external-api/src/lib.rs +++ b/nexus/external-api/src/lib.rs @@ -35,6 +35,7 @@ use openapiv3::OpenAPI; /// Copies of data types that changed between versions mod v2025112000; +pub mod v2025120300; api_versions!([ // API versions are in the format YYYYMMDDNN.0.0, defined below as @@ -64,6 +65,7 @@ api_versions!([ // | date-based version should be at the top of the list. // v // (next_yyyymmddnn, IDENT), + (2025120500, MULTICAST_IMPLICIT_LIFECYCLE_UPDATES), (2025120300, LOCAL_STORAGE), (2025112000, INITIAL), ]); @@ -1051,8 +1053,8 @@ pub trait NexusExternalApi { /// Link IP pool to silo /// /// Users in linked silos can allocate external IPs from this pool for their - /// instances. A silo can have at most one default pool. IPs are allocated from - /// the default pool when users ask for one without specifying a pool. + /// instances. A silo can have at most one default pool. IPs are allocated + /// from the default pool when users ask for one without specifying a pool. #[endpoint { method = POST, path = "/v1/system/ip-pools/{pool}/silos", @@ -1281,12 +1283,41 @@ pub trait NexusExternalApi { ) -> Result, HttpError>; // Multicast Groups + // + // API versioning note: Versioned endpoints can use default trait + // implementations when path types are identical between versions. + // `TypedBody` has `.map()` for input conversion, but `Path` does not. + // Endpoints with different path types (e.g., `v2025120300::MulticastGroupPath` + // vs `params::MulticastGroupPath`) must have implementations in + // `http_entrypoints.rs`. + // + // TODO: Consider adding `.map()` to dropshot's `Path` (similar to + // `TypedBody`) to enable default implementations for versioned endpoints + // with different path types. + + /// List multicast groups. + #[endpoint { + method = GET, + path = "/v1/multicast-groups", + tags = ["experimental"], + operation_id = "multicast_group_list", + versions = ..VERSION_MULTICAST_IMPLICIT_LIFECYCLE_UPDATES, + }] + async fn v2025120300_multicast_group_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError> + { + // Types are identical, delegate directly + Self::multicast_group_list(rqctx, query_params).await + } - /// List all multicast groups. + /// List multicast groups. #[endpoint { method = GET, path = "/v1/multicast-groups", tags = ["experimental"], + versions = VERSION_MULTICAST_IMPLICIT_LIFECYCLE_UPDATES.., }] async fn multicast_group_list( rqctx: RequestContext, @@ -1295,24 +1326,43 @@ pub trait NexusExternalApi { /// Create a multicast group. /// - /// Multicast groups are fleet-scoped resources that can be joined by - /// instances across projects and silos. A single multicast IP serves - /// all group members regardless of project or silo boundaries. + /// Deprecated: Groups are created implicitly when adding members in newer + /// API versions. #[endpoint { method = POST, path = "/v1/multicast-groups", tags = ["experimental"], + versions = ..VERSION_MULTICAST_IMPLICIT_LIFECYCLE_UPDATES, }] - async fn multicast_group_create( + async fn v2025120300_multicast_group_create( rqctx: RequestContext, - group_params: TypedBody, + new_group: TypedBody, ) -> Result, HttpError>; /// Fetch a multicast group. + /// + /// The group can be specified by name or UUID. #[endpoint { method = GET, path = "/v1/multicast-groups/{multicast_group}", tags = ["experimental"], + operation_id = "multicast_group_view", + versions = ..VERSION_MULTICAST_IMPLICIT_LIFECYCLE_UPDATES, + }] + async fn v2025120300_multicast_group_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + /// Fetch a multicast group. + /// + /// The group can be specified by name, UUID, or multicast IP address. + /// (e.g., "224.1.2.3" or "ff38::1"). + #[endpoint { + method = GET, + path = "/v1/multicast-groups/{multicast_group}", + tags = ["experimental"], + versions = VERSION_MULTICAST_IMPLICIT_LIFECYCLE_UPDATES.., }] async fn multicast_group_view( rqctx: RequestContext, @@ -1320,44 +1370,61 @@ pub trait NexusExternalApi { ) -> Result, HttpError>; /// Update a multicast group. + /// + /// Deprecated: groups are managed implicitly through member operations. #[endpoint { method = PUT, path = "/v1/multicast-groups/{multicast_group}", tags = ["experimental"], + versions = ..VERSION_MULTICAST_IMPLICIT_LIFECYCLE_UPDATES, }] - async fn multicast_group_update( + async fn v2025120300_multicast_group_update( rqctx: RequestContext, - path_params: Path, - updated_group: TypedBody, + path_params: Path, + update_params: TypedBody, ) -> Result, HttpError>; /// Delete a multicast group. + /// + /// Deprecated: groups are deleted automatically when the last member leaves. #[endpoint { method = DELETE, path = "/v1/multicast-groups/{multicast_group}", tags = ["experimental"], + versions = ..VERSION_MULTICAST_IMPLICIT_LIFECYCLE_UPDATES, }] - async fn multicast_group_delete( + async fn v2025120300_multicast_group_delete( rqctx: RequestContext, - path_params: Path, + path_params: Path, ) -> Result; - /// Look up multicast group by IP address. + /// List members of a multicast group. + /// + /// The group can be specified by name or UUID. #[endpoint { method = GET, - path = "/v1/system/multicast-groups/by-ip/{address}", + path = "/v1/multicast-groups/{multicast_group}/members", tags = ["experimental"], + operation_id = "multicast_group_member_list", + versions = ..VERSION_MULTICAST_IMPLICIT_LIFECYCLE_UPDATES, }] - async fn lookup_multicast_group_by_ip( + async fn v2025120300_multicast_group_member_list( rqctx: RequestContext, - path_params: Path, - ) -> Result, HttpError>; + path_params: Path, + query_params: Query, + ) -> Result< + HttpResponseOk>, + HttpError, + >; /// List members of a multicast group. + /// + /// The group can be specified by name, UUID, or multicast IP address. #[endpoint { method = GET, path = "/v1/multicast-groups/{multicast_group}/members", tags = ["experimental"], + versions = VERSION_MULTICAST_IMPLICIT_LIFECYCLE_UPDATES.., }] async fn multicast_group_member_list( rqctx: RequestContext, @@ -1365,17 +1432,58 @@ pub trait NexusExternalApi { query_params: Query, ) -> Result>, HttpError>; + /// Add instance to a multicast group. + /// + /// The group can be specified by name or UUID. + #[endpoint { + method = POST, + path = "/v1/multicast-groups/{multicast_group}/members", + tags = ["experimental"], + operation_id = "multicast_group_member_add", + versions = ..VERSION_MULTICAST_IMPLICIT_LIFECYCLE_UPDATES, + }] + async fn v2025120300_multicast_group_member_add( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + member_params: TypedBody, + ) -> Result, HttpError>; + /// Add instance to a multicast group. /// /// Functionally equivalent to updating the instance's `multicast_groups` field. /// Both approaches modify the same underlying membership and trigger the same /// reconciliation logic. /// + /// Authorization: requires Modify on the instance specified in the request + /// body (checked first) and Read on the multicast group (users can only attach + /// instances they are authorized to modify). + /// + /// Group Identification: Groups can be referenced by name, IP address, + /// or UUID. All three are fleet-wide unique identifiers: + /// - By name: If group doesn't exist, it's implicitly created with an + /// auto-allocated IP from a multicast pool linked to the caller's silo. + /// Pool selection prefers the default pool; if none, selects alphabetically. + /// - By IP: If group doesn't exist, it's implicitly created using that + /// IP. The pool is determined by which pool contains the IP. + /// - By UUID: Group must already exist. + /// + /// Source IP filtering (SSM): + /// - Duplicate IPs in the request are automatically deduplicated. + /// - Maximum of 64 source IPs allowed (per RFC 3376, IGMPv3). + /// - Creating a new SSM group: `source_ips` is required. SSM addresses + /// (232.x.x.x for IPv4, FF3x:: for IPv6) require source filtering. + /// - Joining an existing group: If `source_ips` is omitted, the instance + /// inherits the group's existing sources. If specified, they must exactly + /// match the group's sources or the request fails. + /// - Providing `source_ips` to an ASM group (or vice versa) will fail. + /// /// Specify instance by name (requires `?project=`) or UUID. #[endpoint { method = POST, path = "/v1/multicast-groups/{multicast_group}/members", tags = ["experimental"], + versions = VERSION_MULTICAST_IMPLICIT_LIFECYCLE_UPDATES.., }] async fn multicast_group_member_add( rqctx: RequestContext, @@ -1386,15 +1494,38 @@ pub trait NexusExternalApi { /// Remove instance from a multicast group. /// + /// The group can be specified by name or UUID. + #[endpoint { + method = DELETE, + path = "/v1/multicast-groups/{multicast_group}/members/{instance}", + tags = ["experimental"], + operation_id = "multicast_group_member_remove", + versions = ..VERSION_MULTICAST_IMPLICIT_LIFECYCLE_UPDATES, + }] + async fn v2025120300_multicast_group_member_remove( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result; + + /// Remove instance from a multicast group. + /// + /// The group can be specified by name, UUID, or multicast IP address. + /// All three are fleet-wide unique identifiers. + /// /// Functionally equivalent to removing the group from the instance's /// `multicast_groups` field. Both approaches modify the same underlying /// membership and trigger reconciliation. /// + /// Authorization: requires Modify on the instance (checked first) and Read + /// on the multicast group. + /// /// Specify instance by name (requires `?project=`) or UUID. #[endpoint { method = DELETE, path = "/v1/multicast-groups/{multicast_group}/members/{instance}", tags = ["experimental"], + versions = VERSION_MULTICAST_IMPLICIT_LIFECYCLE_UPDATES.., }] async fn multicast_group_member_remove( rqctx: RequestContext, @@ -1402,6 +1533,20 @@ pub trait NexusExternalApi { query_params: Query, ) -> Result; + /// Look up multicast group by IP address. + /// + /// Deprecated: use the main view endpoint which accepts IP addresses directly. + #[endpoint { + method = GET, + path = "/v1/system/multicast-groups/by-ip/{address}", + tags = ["experimental"], + versions = ..VERSION_MULTICAST_IMPLICIT_LIFECYCLE_UPDATES, + }] + async fn v2025120300_lookup_multicast_group_by_ip( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + // Disks /// List disks @@ -1619,7 +1764,23 @@ pub trait NexusExternalApi { method = POST, path = "/v1/instances", tags = ["instances"], - versions = VERSION_LOCAL_STORAGE.., + versions = VERSION_LOCAL_STORAGE..VERSION_MULTICAST_IMPLICIT_LIFECYCLE_UPDATES, + }] + async fn v2025120300_instance_create( + rqctx: RequestContext, + query_params: Query, + new_instance: TypedBody, + ) -> Result, HttpError> { + Self::instance_create(rqctx, query_params, new_instance.map(Into::into)) + .await + } + + /// Create instance + #[endpoint { + method = POST, + path = "/v1/instances", + tags = ["instances"], + versions = VERSION_MULTICAST_IMPLICIT_LIFECYCLE_UPDATES.., }] async fn instance_create( rqctx: RequestContext, @@ -1656,6 +1817,29 @@ pub trait NexusExternalApi { method = PUT, path = "/v1/instances/{instance}", tags = ["instances"], + versions = ..VERSION_MULTICAST_IMPLICIT_LIFECYCLE_UPDATES, + }] + async fn v2025120300_instance_update( + rqctx: RequestContext, + query_params: Query, + path_params: Path, + instance_config: TypedBody, + ) -> Result, HttpError> { + Self::instance_update( + rqctx, + query_params, + path_params, + instance_config.map(Into::into), + ) + .await + } + + /// Update instance + #[endpoint { + method = PUT, + path = "/v1/instances/{instance}", + tags = ["instances"], + versions = VERSION_MULTICAST_IMPLICIT_LIFECYCLE_UPDATES.., }] async fn instance_update( rqctx: RequestContext, @@ -2808,11 +2992,46 @@ pub trait NexusExternalApi { // Instance Multicast Groups - /// List multicast groups for instance + /// List multicast groups for an instance. + #[endpoint { + method = GET, + path = "/v1/instances/{instance}/multicast-groups", + tags = ["experimental"], + operation_id = "instance_multicast_group_list", + versions = ..VERSION_MULTICAST_IMPLICIT_LIFECYCLE_UPDATES, + }] + async fn v2025120300_instance_multicast_group_list( + rqctx: RequestContext, + query_params: Query, + path_params: Path, + ) -> Result< + HttpResponseOk>, + HttpError, + > { + match Self::instance_multicast_group_list( + rqctx, + query_params, + path_params, + ) + .await + { + Ok(page) => { + let new_page = ResultsPage { + next_page: page.0.next_page, + items: page.0.items.into_iter().map(Into::into).collect(), + }; + Ok(HttpResponseOk(new_page)) + } + Err(e) => Err(e), + } + } + + /// List multicast groups for an instance. #[endpoint { method = GET, path = "/v1/instances/{instance}/multicast-groups", tags = ["experimental"], + versions = VERSION_MULTICAST_IMPLICIT_LIFECYCLE_UPDATES.., }] async fn instance_multicast_group_list( rqctx: RequestContext, @@ -2823,31 +3042,96 @@ pub trait NexusExternalApi { HttpError, >; - /// Join multicast group. + /// Join a multicast group. + /// + /// The group can be specified by name or UUID. This endpoint does not + /// accept a request body. + #[endpoint { + method = PUT, + path = "/v1/instances/{instance}/multicast-groups/{multicast_group}", + tags = ["experimental"], + operation_id = "instance_multicast_group_join", + versions = ..VERSION_MULTICAST_IMPLICIT_LIFECYCLE_UPDATES, + }] + async fn v2025120300_instance_multicast_group_join( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError>; + + /// Join a multicast group. /// /// This is functionally equivalent to adding the instance via the group's /// member management endpoint or updating the instance's `multicast_groups` /// field. All approaches modify the same membership and trigger reconciliation. + /// + /// Authorization: requires Modify on the instance identified in the URL path + /// (checked first) and Read on the multicast group. Checking instance permission + /// first prevents creating orphaned groups when the instance check fails. + /// + /// Group Identification: Groups can be referenced by name, IP address, + /// or UUID. All three are fleet-wide unique identifiers: + /// - By name: If group doesn't exist, it's implicitly created with an + /// auto-allocated IP from a multicast pool linked to the caller's silo. + /// Pool selection prefers the default pool; if none, selects alphabetically. + /// - By IP: If group doesn't exist, it's implicitly created using that + /// IP. The pool is determined by which pool contains the IP. + /// - By UUID: Group must already exist. + /// + /// Source IP filtering (SSM): + /// - Duplicate IPs in the request are automatically deduplicated. + /// - Maximum of 64 source IPs allowed (per RFC 3376, IGMPv3). + /// - Creating a new SSM group: `source_ips` is required. SSM addresses + /// (232.x.x.x for IPv4, FF3x:: for IPv6) require source filtering. + /// - Joining an existing group: If `source_ips` is omitted, the instance + /// inherits the group's existing sources. If specified, they must exactly + /// match the group's sources or the request fails. + /// - Providing `source_ips` to an ASM group (or vice versa) will fail. #[endpoint { method = PUT, path = "/v1/instances/{instance}/multicast-groups/{multicast_group}", tags = ["experimental"], + versions = VERSION_MULTICAST_IMPLICIT_LIFECYCLE_UPDATES.., }] async fn instance_multicast_group_join( rqctx: RequestContext, path_params: Path, query_params: Query, + body_params: TypedBody, ) -> Result, HttpError>; - /// Leave multicast group. + /// Leave a multicast group. + /// + /// The group can be specified by name or UUID. + #[endpoint { + method = DELETE, + path = "/v1/instances/{instance}/multicast-groups/{multicast_group}", + tags = ["experimental"], + operation_id = "instance_multicast_group_leave", + versions = ..VERSION_MULTICAST_IMPLICIT_LIFECYCLE_UPDATES, + }] + async fn v2025120300_instance_multicast_group_leave( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result; + + /// Leave a multicast group. + /// + /// The group can be specified by name, UUID, or multicast IP address. + /// All three are fleet-wide unique identifiers. /// /// This is functionally equivalent to removing the instance via the group's /// member management endpoint or updating the instance's `multicast_groups` /// field. All approaches modify the same membership and trigger reconciliation. + /// + /// Authorization: requires Modify on the instance (checked first) and Read + /// on the multicast group. #[endpoint { method = DELETE, path = "/v1/instances/{instance}/multicast-groups/{multicast_group}", tags = ["experimental"], + versions = VERSION_MULTICAST_IMPLICIT_LIFECYCLE_UPDATES.., }] async fn instance_multicast_group_leave( rqctx: RequestContext, diff --git a/nexus/external-api/src/v2025112000.rs b/nexus/external-api/src/v2025112000.rs index 88b6ea7b23f..295ca92de57 100644 --- a/nexus/external-api/src/v2025112000.rs +++ b/nexus/external-api/src/v2025112000.rs @@ -309,7 +309,11 @@ impl From for params::InstanceCreate { user_data: old.user_data, network_interfaces: old.network_interfaces, external_ips: old.external_ips, - multicast_groups: old.multicast_groups, + multicast_groups: old + .multicast_groups + .into_iter() + .map(Into::into) + .collect(), disks: old.disks.into_iter().map(Into::into).collect(), boot_disk: old.boot_disk.map(Into::into), ssh_public_keys: old.ssh_public_keys, diff --git a/nexus/external-api/src/v2025120300.rs b/nexus/external-api/src/v2025120300.rs new file mode 100644 index 00000000000..1e5c19f8fc2 --- /dev/null +++ b/nexus/external-api/src/v2025120300.rs @@ -0,0 +1,361 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Nexus external API types (version 2025120300) +//! +//! Version 2025120300 types (before [`MulticastGroupIdentifier`] was introduced +//! and before implicit group lifecycle). +//! +//! Key differences from newer API versions: +//! - Uses [`NameOrId`] for multicast group references (not [`MulticastGroupIdentifier`]). +//! Newer versions accept name, UUID, or multicast IP address, while this version +//! only accepts name or UUID. +//! - Has explicit [`MulticastGroupCreate`] and [`MulticastGroupUpdate`] types +//! (newer versions create/delete groups implicitly via member operations). +//! - [`MulticastGroupMemberAdd`] doesn't have `source_ips` field. +//! +//! [`MulticastGroupIdentifier`]: params::MulticastGroupIdentifier +//! [`NameOrId`]: omicron_common::api::external::NameOrId +//! [`MulticastGroupCreate`]: self::MulticastGroupCreate +//! [`MulticastGroupUpdate`]: self::MulticastGroupUpdate +//! [`MulticastGroupMemberAdd`]: self::MulticastGroupMemberAdd + +use std::net::IpAddr; + +use chrono::{DateTime, Utc}; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +use nexus_types::external_api::params::UserData; +use nexus_types::external_api::{params, views}; +use nexus_types::multicast::MulticastGroupCreate as InternalMulticastGroupCreate; +use omicron_common::api::external::{ + ByteCount, Hostname, IdentityMetadataCreateParams, + InstanceAutoRestartPolicy, InstanceCpuCount, InstanceCpuPlatform, Name, + NameOrId, Nullable, +}; +use omicron_common::vlan::VlanID; +use params::{ + ExternalIpCreate, InstanceDiskAttachment, + InstanceNetworkInterfaceAttachment, +}; + +/// Path parameter for multicast group operations. +/// +/// Uses `NameOrId` instead of `MulticastGroupIdentifier`. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct MulticastGroupPath { + /// Name or ID of the multicast group + pub multicast_group: NameOrId, +} + +impl From for params::MulticastGroupPath { + fn from(old: MulticastGroupPath) -> Self { + Self { multicast_group: old.multicast_group.into() } + } +} + +/// Path parameters for multicast group member operations. +/// +/// Uses `NameOrId` instead of `MulticastGroupIdentifier` for the group. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct MulticastGroupMemberPath { + /// Name or ID of the multicast group + pub multicast_group: NameOrId, + /// Name or ID of the instance + pub instance: NameOrId, +} + +impl From for params::MulticastGroupMemberPath { + fn from(old: MulticastGroupMemberPath) -> Self { + Self { + multicast_group: old.multicast_group.into(), + instance: old.instance, + } + } +} + +/// Path parameters for instance multicast group operations. +/// +/// Uses `NameOrId` instead of `MulticastGroupIdentifier` for the group. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct InstanceMulticastGroupPath { + /// Name or ID of the instance + pub instance: NameOrId, + /// Name or ID of the multicast group + pub multicast_group: NameOrId, +} + +impl From for params::InstanceMulticastGroupPath { + fn from(old: InstanceMulticastGroupPath) -> Self { + Self { + instance: old.instance, + multicast_group: old.multicast_group.into(), + } + } +} + +/// Create-time parameters for a multicast group. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct MulticastGroupCreate { + pub name: Name, + pub description: String, + /// The multicast IP address to allocate. If None, one will be allocated + /// from the default pool. + #[serde(default)] + pub multicast_ip: Option, + /// Source IP addresses for Source-Specific Multicast (SSM). + /// + /// None uses default behavior (Any-Source Multicast). + /// Empty list explicitly allows any source (Any-Source Multicast). + /// Non-empty list restricts to specific sources (SSM). + #[serde(default)] + pub source_ips: Option>, + /// Name or ID of the IP pool to allocate from. If None, uses the default + /// multicast pool. + #[serde(default)] + pub pool: Option, + /// Multicast VLAN (MVLAN) for egress multicast traffic to upstream networks. + /// Tags packets leaving the rack to traverse VLAN-segmented upstream networks. + /// + /// Valid range: 2-4094 (VLAN IDs 0-1 are reserved by IEEE 802.1Q standard). + #[serde(default)] + pub mvlan: Option, +} + +/// Update-time parameters for a multicast group. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct MulticastGroupUpdate { + /// New name for the multicast group + #[serde(default)] + pub name: Option, + /// New description for the multicast group + #[serde(default)] + pub description: Option, + /// Update source IPs for SSM + #[serde(default, skip_serializing_if = "Option::is_none")] + pub source_ips: Option>, + /// Multicast VLAN (MVLAN) for egress multicast traffic to upstream networks. + /// Set to null to clear the MVLAN. Valid range: 2-4094 when provided. + /// Omit the field to leave mvlan unchanged. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub mvlan: Option>, +} + +/// Parameters for adding an instance to a multicast group. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct MulticastGroupMemberAdd { + /// Name or ID of the instance to add to the multicast group + pub instance: NameOrId, +} + +impl From for params::MulticastGroupMemberAdd { + fn from(old: MulticastGroupMemberAdd) -> Self { + Self { instance: old.instance, source_ips: None } + } +} + +/// Path parameter for looking up a multicast group by IP address. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct MulticastGroupByIpPath { + /// IP address of the multicast group + pub address: IpAddr, +} + +impl From for InternalMulticastGroupCreate { + fn from(old: MulticastGroupCreate) -> Self { + Self { + identity: IdentityMetadataCreateParams { + name: old.name, + description: old.description, + }, + multicast_ip: old.multicast_ip, + source_ips: old.source_ips, + mvlan: old.mvlan, + } + } +} + +/// View of a Multicast Group Member. +/// +/// This version doesn't have the `multicast_ip` field which was added later. +#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, JsonSchema)] +pub struct MulticastGroupMember { + /// unique, immutable, system-controlled identifier for each resource + pub id: Uuid, + /// unique, mutable, user-controlled identifier for each resource + pub name: Name, + /// human-readable free-form text about a resource + pub description: String, + /// timestamp when this resource was created + pub time_created: DateTime, + /// timestamp when this resource was last modified + pub time_modified: DateTime, + /// The ID of the multicast group this member belongs to. + pub multicast_group_id: Uuid, + /// The ID of the instance that is a member of this group. + pub instance_id: Uuid, + /// Current state of the multicast group membership. + pub state: String, +} + +impl From for MulticastGroupMember { + fn from(v: views::MulticastGroupMember) -> Self { + Self { + id: v.identity.id, + name: v.identity.name, + description: v.identity.description, + time_created: v.identity.time_created, + time_modified: v.identity.time_modified, + multicast_group_id: v.multicast_group_id, + instance_id: v.instance_id, + state: v.state, + } + } +} + +/// Create-time parameters for an `Instance`. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct InstanceCreate { + #[serde(flatten)] + pub identity: IdentityMetadataCreateParams, + /// The number of vCPUs to be allocated to the instance + pub ncpus: InstanceCpuCount, + /// The amount of RAM (in bytes) to be allocated to the instance + pub memory: ByteCount, + /// The hostname to be assigned to the instance + pub hostname: Hostname, + + /// User data for instance initialization systems (such as cloud-init). + /// Must be a Base64-encoded string, as specified in RFC 4648 § 4 (+ and / + /// characters with padding). Maximum 32 KiB unencoded data. + #[serde(default, with = "UserData")] + pub user_data: Vec, + + /// The network interfaces to be created for this instance. + #[serde(default)] + pub network_interfaces: InstanceNetworkInterfaceAttachment, + + /// The external IP addresses provided to this instance. + #[serde(default)] + pub external_ips: Vec, + + /// Multicast groups this instance should be joined to upon creation. + /// + /// Provide a list of multicast group names or UUIDs. Newer API versions + /// also accept multicast IP addresses. + #[serde(default)] + pub multicast_groups: Vec, + + /// A list of disks to be attached to the instance. + #[serde(default)] + pub disks: Vec, + + /// The disk the instance is configured to boot from. + #[serde(default)] + pub boot_disk: Option, + + /// An allowlist of SSH public keys to be transferred to the instance via + /// cloud-init during instance creation. + pub ssh_public_keys: Option>, + + /// Should this instance be started upon creation; true by default. + #[serde(default = "bool_true")] + pub start: bool, + + /// The auto-restart policy for this instance. + #[serde(default)] + pub auto_restart_policy: Option, + + /// Anti-Affinity groups which this instance should be added. + #[serde(default)] + pub anti_affinity_groups: Vec, + + /// The CPU platform to be used for this instance. + #[serde(default)] + pub cpu_platform: Option, +} + +#[inline] +fn bool_true() -> bool { + true +} + +impl From for params::InstanceCreate { + fn from(old: InstanceCreate) -> Self { + // Convert NameOrId to MulticastGroupIdentifier + let multicast_groups = + old.multicast_groups.into_iter().map(|g| g.into()).collect(); + + Self { + identity: old.identity, + ncpus: old.ncpus, + memory: old.memory, + hostname: old.hostname, + user_data: old.user_data, + network_interfaces: old.network_interfaces, + external_ips: old.external_ips, + multicast_groups, + disks: old.disks, + boot_disk: old.boot_disk, + ssh_public_keys: old.ssh_public_keys, + start: old.start, + auto_restart_policy: old.auto_restart_policy, + anti_affinity_groups: old.anti_affinity_groups, + cpu_platform: old.cpu_platform, + } + } +} + +/// Parameters of an `Instance` that can be reconfigured after creation. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct InstanceUpdate { + /// The number of vCPUs to be allocated to the instance + pub ncpus: InstanceCpuCount, + + /// The amount of RAM (in bytes) to be allocated to the instance + pub memory: ByteCount, + + /// The disk the instance is configured to boot from. + pub boot_disk: Nullable, + + /// The auto-restart policy for this instance. + pub auto_restart_policy: Nullable, + + /// The CPU platform to be used for this instance. + pub cpu_platform: Nullable, + + /// Multicast groups this instance should join. + /// + /// When specified, this replaces the instance's current multicast group + /// membership with the new set of groups. The instance will leave any + /// groups not listed here and join any new groups that are specified. + /// + /// If not provided (None), the instance's multicast group membership + /// will not be changed. + /// + /// Accepts group names or UUIDs. Newer API versions also accept multicast + /// IP addresses. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub multicast_groups: Option>, +} + +impl From for params::InstanceUpdate { + fn from(old: InstanceUpdate) -> Self { + // Convert Option> to Option> + let multicast_groups = old + .multicast_groups + .map(|groups| groups.into_iter().map(|g| g.into()).collect()); + + Self { + ncpus: old.ncpus, + memory: old.memory, + boot_disk: old.boot_disk, + auto_restart_policy: old.auto_restart_policy, + cpu_platform: old.cpu_platform, + multicast_groups, + } + } +} diff --git a/nexus/src/app/background/tasks/multicast/groups.rs b/nexus/src/app/background/tasks/multicast/groups.rs index 3aa9330e057..1b094bff348 100644 --- a/nexus/src/app/background/tasks/multicast/groups.rs +++ b/nexus/src/app/background/tasks/multicast/groups.rs @@ -18,7 +18,7 @@ //! //! ## Operations Handled //! - **"Creating" state**: Initiate DPD "ensure" to apply configuration -//! - **"Active" state**: Detect DPD drift and launch UPDATE saga when DB state differs +//! - **"Active" state**: Detect DPD drift and sync directly //! - **"Deleting" state**: Switch cleanup and database removal //! - **Extensible processing**: Support for different group types //! @@ -47,8 +47,8 @@ //! | Condition | DPD State | Action | Next State | //! |-----------|-----------|---------|------------| //! | 1 | Matches DB | No action | "Active" (NoChange) | -//! | 2 | Differs from DB | Launch UPDATE saga to fix drift | "Active" (StateChanged) | -//! | 3 | Missing/error | Launch UPDATE saga to fix drift | "Active" (StateChanged) | +//! | 2 | Differs from DB | Direct dataplane call to fix drift | "Active" (StateChanged) | +//! | 3 | Missing/error | Direct dataplane call to fix drift | "Active" (StateChanged) | //! //! ### DELETING State Transitions //! | Condition | DPD cleanup (external+underlay) | DB cleanup (row) | Action | Next State | @@ -75,7 +75,7 @@ use anyhow::Context; use futures::stream::{self, StreamExt}; -use slog::{debug, error, trace, warn}; +use slog::{debug, error, info, trace, warn}; use nexus_db_model::{MulticastGroup, MulticastGroupState}; use nexus_db_queries::context::OpContext; @@ -84,16 +84,23 @@ use omicron_common::api::external::DataPageParams; use omicron_uuid_kinds::{GenericUuid, MulticastGroupUuid}; use super::{MulticastGroupReconciler, StateTransition}; -use crate::app::multicast::dataplane::MulticastDataplaneClient; +use crate::app::multicast::dataplane::{ + GroupUpdateParams, MulticastDataplaneClient, +}; use crate::app::saga::create_saga_dag; use crate::app::sagas; -/// Check if DPD tag matches database name. -fn dpd_state_matches_name( +/// Check if DPD tag matches database UUID. +/// +/// Tags are UUID-based to prevent collision when group names are reused. +fn dpd_state_matches_tag( dpd_group: &dpd_client::types::MulticastGroupExternalResponse, db_group: &MulticastGroup, ) -> bool { - dpd_group.tag.as_ref().map_or(false, |tag| tag == db_group.name().as_str()) + dpd_group + .tag + .as_ref() + .map_or(false, |tag| tag == &db_group.id().to_string()) } /// Check if DPD sources match database sources. @@ -484,8 +491,7 @@ impl MulticastGroupReconciler { /// External group handler for groups in "Active" state. /// /// Checks if the group's DPD state matches the database state. If not, - /// launches the UPDATE saga to sync. This handles updates triggered by - /// the UPDATE API endpoint and self-corrects any DPD drift. + /// we make a dataplane calls to sync. This self-corrects any DPD drift. async fn handle_active_external_group( &self, opctx: &OpContext, @@ -504,20 +510,20 @@ impl MulticastGroupReconciler { .await { Ok(Some(dpd_group)) => { - let name_matches = dpd_state_matches_name(&dpd_group, group); + let tag_matches = dpd_state_matches_tag(&dpd_group, group); let sources_match = dpd_state_matches_sources(&dpd_group, group); let mvlan_matches = dpd_state_matches_mvlan(&dpd_group, group); let needs_update = - !name_matches || !sources_match || !mvlan_matches; + !tag_matches || !sources_match || !mvlan_matches; if needs_update { debug!( opctx.log, "detected DPD state mismatch for active group"; "group_id" => %group.id(), - "name_matches" => name_matches, + "tag_matches" => tag_matches, "sources_match" => sources_match, "mvlan_matches" => mvlan_matches ); @@ -554,32 +560,46 @@ impl MulticastGroupReconciler { "multicast_ip" => %group.multicast_ip ); - let saga_params = sagas::multicast_group_dpd_update::Params { - serialized_authn: - nexus_db_queries::authn::saga::Serialized::for_opctx(opctx), - external_group_id: group.id(), - underlay_group_id, - }; - - let dag = create_saga_dag::< - sagas::multicast_group_dpd_update::SagaMulticastGroupDpdUpdate, - >(saga_params) - .context("failed to create multicast group update saga")?; - - let saga_id = self - .sagas - .saga_start(dag) + // Fetch underlay group for the update + let underlay_group = self + .datastore + .underlay_multicast_group_fetch(opctx, underlay_group_id) .await - .context("failed to start multicast group update saga")?; - - debug!( - opctx.log, - "DPD update saga initiated for active group"; - "external_group_id" => %group.id(), - "saga_id" => %saga_id, - ); - - Ok(StateTransition::StateChanged) + .context( + "failed to fetch underlay group for drift correction", + )?; + + // Direct dataplane call for drift correction + // If update fails, we leave existing state and retry on next RPW cycle. + match dataplane_client + .update_groups(GroupUpdateParams { + external_group: group, + underlay_group: &underlay_group, + new_name: group.name().as_str(), + new_sources: &group.source_ips, + }) + .await + { + Ok(_) => { + info!( + opctx.log, + "drift correction completed for active group"; + "group_id" => %group.id(), + "multicast_ip" => %group.multicast_ip + ); + Ok(StateTransition::StateChanged) + } + Err(e) => { + warn!( + opctx.log, + "drift correction failed, will retry on next cycle"; + "group_id" => %group.id(), + "error" => %e + ); + // Return NoChange so RPW retries on next activation + Ok(StateTransition::NoChange) + } + } } else { Ok(StateTransition::NoChange) } @@ -623,7 +643,8 @@ impl MulticastGroupReconciler { "group" => ?group ); - // Generate underlay multicast IP using IPv6 admin-local scope (RFC 7346) + // Generate underlay multicast IP using our ff04::/64 prefix + // (part of the RFC 7346 admin-local scope ff04::/16) let underlay_ip = self .map_external_to_underlay_ip(group.multicast_ip.ip()) .context( @@ -723,6 +744,15 @@ impl MulticastGroupReconciler { .context("failed to delete underlay group from database")?; } + // Delete all membership records for this group + self.datastore + .multicast_group_members_delete_by_group( + opctx, + MulticastGroupUuid::from_untyped_uuid(group.id()), + ) + .await + .context("failed to delete group members from database")?; + // Delete of external group record self.datastore .multicast_group_delete( diff --git a/nexus/src/app/background/tasks/multicast/members.rs b/nexus/src/app/background/tasks/multicast/members.rs index cfa298a3f77..ce684954284 100644 --- a/nexus/src/app/background/tasks/multicast/members.rs +++ b/nexus/src/app/background/tasks/multicast/members.rs @@ -99,7 +99,7 @@ use std::collections::{BTreeMap, BTreeSet, HashMap}; use std::sync::Arc; -use std::time::SystemTime; +use std::time::Instant; use anyhow::{Context, Result}; use futures::stream::{self, StreamExt}; @@ -264,6 +264,13 @@ impl MemberStateProcessor for InstanceMemberProcessor { } impl MulticastGroupReconciler { + /// Group states that require member reconciliation processing. + const RECONCILABLE_STATES: &'static [MulticastGroupState] = &[ + MulticastGroupState::Creating, + MulticastGroupState::Active, + MulticastGroupState::Deleting, + ]; + /// Process member state changes ("Joining"→"Joined"→"Left"). pub async fn reconcile_member_states( &self, @@ -401,6 +408,31 @@ impl MulticastGroupReconciler { instance_states: &InstanceStateMap, dataplane_client: &MulticastDataplaneClient, ) -> Result { + // Check if the parent group has been deleted or is being deleted. + // If so, delete the member so cleanup can proceed. + // + // This should be impossible under normal operation because: + // 1. Members can only be added to "Active" groups (member_attach CTE) + // 2. Groups only transition to "Deleting" when there are no active + // members (`mark_multicast_group_for_removal_if_no_members`) + // + // However, we provide a fallthrough case for robustness. + if group.time_deleted().is_some() + || group.state == MulticastGroupState::Deleting + { + warn!( + opctx.log, + "member found for deleted/deleting group (unexpected state)"; + "member_id" => %member.id, + "group_id" => %group.id(), + "group_state" => ?group.state, + "group_time_deleted" => ?group.time_deleted() + ); + return self + .delete_member_for_deleted_group(opctx, group, member) + .await; + } + // For now, all members are instance-based, but this is where we'd // dispatch to different processors for different member types let processor = InstanceMemberProcessor; @@ -445,6 +477,47 @@ impl MulticastGroupReconciler { } } + /// Delete a member when its parent group has been deleted or is being deleted. + /// Sets `time_deleted` and transitions to "Left" state for RPW cleanup. + async fn delete_member_for_deleted_group( + &self, + opctx: &OpContext, + group: &MulticastGroup, + member: &MulticastGroupMember, + ) -> Result { + // Skip if member is already deleted + if member.time_deleted.is_some() { + debug!( + opctx.log, + "member already deleted, no action needed"; + "member_id" => %member.id, + "group_id" => %group.id() + ); + return Ok(StateTransition::NoChange); + } + + // Delete the member (sets `time_deleted`, `state`="Left", and clears `sled_id`) + self.datastore + .multicast_group_member_delete_by_id( + opctx, + member.id.into_untyped_uuid(), + ) + .await + .context("failed to delete member for deleted group")?; + + info!( + opctx.log, + "member deleted due to parent group deletion"; + "member_id" => %member.id, + "instance_id" => %member.parent_id, + "group_id" => %group.id(), + "group_state" => ?group.state, + "group_time_deleted" => ?group.time_deleted() + ); + + Ok(StateTransition::StateChanged) + } + /// Instance-specific handler for members in "Joining" state. /// /// Validates instance state and attempts to transition the member to "Joined" @@ -2067,6 +2140,76 @@ impl MulticastGroupReconciler { Ok(deleted_count) } + /// Check for and implicitly delete empty groups. + /// + /// With implicit deletion, all multicast groups are deleted when all members + /// are removed. This function checks "Active" groups for any that have no + /// active members and marks them for deletion. + /// + /// This handles the case where instance deletion causes members to be + /// soft-deleted (via `multicast_group_members_mark_for_removal`), and after + /// the member cleanup removes those records, the group becomes empty. + /// + /// The underlying datastore method uses an atomic NOT EXISTS guard to + /// prevent race conditions where a concurrent join could create a member + /// between the emptiness check and the mark-for-removal. + pub async fn cleanup_empty_groups( + &self, + opctx: &OpContext, + ) -> Result { + trace!( + opctx.log, + "checking for empty multicast groups to implicitly delete" + ); + + // List all Active groups + let active_groups = self + .datastore + .multicast_groups_list_by_state( + opctx, + MulticastGroupState::Active, + &DataPageParams::max_page(), + ) + .await + .context("failed to list active groups")?; + + let mut groups_marked = 0; + + for group in active_groups { + // Atomically mark for deletion only if no members exist. + // This is race-safe: the NOT EXISTS guard in the datastore method + // ensures we don't delete a group that just gained a member. + let marked = self + .datastore + .mark_multicast_group_for_removal_if_no_members( + opctx, + MulticastGroupUuid::from_untyped_uuid(group.id()), + ) + .await + .context("failed to check/mark empty group for removal")?; + + if marked { + info!( + opctx.log, + "auto-deleting empty multicast group"; + "group_id" => %group.id(), + "group_name" => %group.name() + ); + groups_marked += 1; + } + } + + if groups_marked > 0 { + info!( + opctx.log, + "marked empty multicast groups for deletion"; + "groups_marked" => groups_marked + ); + } + + Ok(groups_marked) + } + /// Get all members for a group. async fn get_group_members( &self, @@ -2090,12 +2233,7 @@ impl MulticastGroupReconciler { ) -> Option> { let cache = self.sled_mapping_cache.read().await; let (cached_at, mappings) = &*cache; - - // If we can't determine elapsed time, consider cache expired - let elapsed = match cached_at.elapsed() { - Ok(duration) => duration, - Err(_) => return None, - }; + let elapsed = cached_at.elapsed(); if elapsed < self.sled_cache_ttl { mappings.get(&cache_key).cloned() @@ -2149,15 +2287,7 @@ impl MulticastGroupReconciler { let previous_map = { let cache = self.backplane_map_cache.read().await; if let Some((cached_at, ref map)) = *cache { - // If we can't determine elapsed time, consider cache expired - let elapsed = match cached_at.elapsed() { - Ok(duration) => duration, - Err(_) => { - // If errored, we consider cache expired and return - // previous map for comparison - return Ok(map.clone()); - } - }; + let elapsed = cached_at.elapsed(); if elapsed < self.backplane_cache_ttl { trace!( @@ -2201,7 +2331,7 @@ impl MulticastGroupReconciler { // Update cache let mut cache = self.backplane_map_cache.write().await; - *cache = Some((SystemTime::now(), backplane_map.clone())); + *cache = Some((Instant::now(), backplane_map.clone())); Ok(backplane_map) } @@ -2451,7 +2581,7 @@ impl MulticastGroupReconciler { // Update cache let sled_count = mappings.len(); let mut cache = self.sled_mapping_cache.write().await; - *cache = (SystemTime::now(), mappings); + *cache = (Instant::now(), mappings); // Log results if validation_failures > 0 { @@ -2493,41 +2623,20 @@ impl MulticastGroupReconciler { } /// Get all multicast groups that need member reconciliation. - /// Returns both "Creating" and "Active" groups. + /// Returns "Creating", "Active", and "Deleting" groups. async fn get_reconcilable_groups( &self, opctx: &OpContext, ) -> Result, anyhow::Error> { - // For now, we still make two queries but this is where we'd add - // a single combined query method if/when the datastore supports it - let mut groups = self - .datastore - .multicast_groups_list_by_state( + self.datastore + .multicast_groups_list_by_states( opctx, - MulticastGroupState::Creating, + Self::RECONCILABLE_STATES, &DataPageParams::max_page(), ) .await - .context("failed to list 'Creating' multicast groups")?; - - let active_groups = self - .datastore - .multicast_groups_list_by_state( - opctx, - MulticastGroupState::Active, - &DataPageParams::max_page(), + .context( + "failed to list multicast groups for member reconciliation", ) - .await - .context("failed to list 'Active' multicast groups")?; - - groups.extend(active_groups); - - debug!( - opctx.log, - "found groups for member reconciliation"; - "total_groups" => groups.len() - ); - - Ok(groups) } } diff --git a/nexus/src/app/background/tasks/multicast/mod.rs b/nexus/src/app/background/tasks/multicast/mod.rs index a8a26cac7a7..8a30ac5bd96 100644 --- a/nexus/src/app/background/tasks/multicast/mod.rs +++ b/nexus/src/app/background/tasks/multicast/mod.rs @@ -46,11 +46,12 @@ //! - Subject to VPC routing and firewall policies //! //! **Underlay Groups** (admin-scoped IPv6): -//! - IPv6 multicast scope per RFC 7346; admin-local is ff04::/16 -//! +//! - Uses ff04::/64 prefix (subset of admin-local scope per [RFC 7346]) //! - Internal rack forwarding to guest instances //! - Mapped 1:1 with external groups via deterministic mapping //! +//! [RFC 7346]: https://www.rfc-editor.org/rfc/rfc7346 +//! //! ### Forwarding Architecture (Incoming multicast traffic to guests) //! //! Traffic flow for multicast into the rack and to guest instances: @@ -94,7 +95,7 @@ use std::collections::{BTreeMap, HashMap}; use std::net::{IpAddr, Ipv6Addr}; use std::sync::Arc; use std::sync::atomic::{AtomicBool, Ordering}; -use std::time::{Duration, SystemTime}; +use std::time::{Duration, Instant}; use anyhow::Result; use futures::FutureExt; @@ -109,7 +110,6 @@ use nexus_config::DEFAULT_UNDERLAY_MULTICAST_NET; use nexus_db_model::MulticastGroup; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::DataStore; -use nexus_types::identity::Resource; use nexus_types::internal_api::background::MulticastGroupReconcilerStatus; use omicron_uuid_kinds::SledUuid; @@ -122,13 +122,13 @@ pub(crate) mod members; /// Type alias for the sled mapping cache. type SledMappingCache = - Arc>)>>; + Arc>)>>; /// Type alias for the backplane map cache. type BackplaneMapCache = Arc< RwLock< Option<( - SystemTime, + Instant, BTreeMap< dpd_client::types::PortId, dpd_client::types::BackplaneLink, @@ -209,7 +209,7 @@ impl MulticastGroupReconciler { sagas, underlay_admin_prefix, sled_mapping_cache: Arc::new(RwLock::new(( - SystemTime::now(), + Instant::now(), HashMap::new(), ))), sled_cache_ttl, @@ -224,22 +224,24 @@ impl MulticastGroupReconciler { /// Generate tag for multicast groups. /// - /// Both external and underlay groups use the same tag (the group name). - /// This pairs them logically for management and cleanup operations. + /// Delegates to [`MulticastGroup::dpd_tag()`] which uses the group's UUID + /// to ensure uniqueness across the group's entire lifecycle. pub(crate) fn generate_multicast_tag(group: &MulticastGroup) -> String { - group.name().to_string() + group.dpd_tag() } /// Generate admin-scoped IPv6 multicast address from an external multicast /// address. /// - /// Maps external addresses into the configured underlay admin-local prefix - /// (DEFAULT_UNDERLAY_MULTICAST_NET) using bitmask mapping. Preserves the - /// lower `128 - prefix_len` bits from the external address (the group ID) - /// and sets the high bits from the prefix. + /// Maps external addresses into our ff04::/64 underlay prefix + /// (DEFAULT_UNDERLAY_MULTICAST_NET, part of RFC 7346 admin-local scope) + /// using XOR-fold mapping. Preserves the lower `128 - prefix_len` bits + /// from the external address (the group ID) and sets the high bits from + /// the prefix. + /// + /// See [RFC 7346] for IPv6 multicast admin-local scope (ff04::/16). /// - /// Admin-local scope (ff04::/16) is defined in RFC 7346. - /// See: + /// [RFC 7346]: https://www.rfc-editor.org/rfc/rfc7346 pub(crate) fn map_external_to_underlay_ip( &self, external_ip: IpAddr, @@ -267,8 +269,8 @@ impl MulticastGroupReconciler { /// - Need to re-validate sled mappings against new topology pub(crate) async fn invalidate_sled_mapping_cache(&self) { let mut cache = self.sled_mapping_cache.write().await; - // Set timestamp to epoch to force refresh - *cache = (SystemTime::UNIX_EPOCH, cache.1.clone()); + // Set timestamp to past to force refresh on next check + *cache = (Instant::now() - self.sled_cache_ttl, cache.1.clone()); } } @@ -455,38 +457,51 @@ impl MulticastGroupReconciler { } } - // Process deleting groups - match self.reconcile_deleting_groups(opctx, &dataplane_client).await { - Ok(count) => status.groups_deleted += count, + // Process member state changes + match self.reconcile_member_states(opctx, &dataplane_client).await { + Ok(count) => status.members_processed += count, Err(e) => { - let msg = format!("failed to reconcile deleting groups: {e:#}"); + let msg = format!("failed to reconcile member states: {e:#}"); status.errors.push(msg); } } - // Reconcile active groups (verify state, update dataplane as needed) - match self.reconcile_active_groups(opctx, &dataplane_client).await { - Ok(count) => status.groups_verified += count, + // Clean up deleted members ("Left" + `time_deleted`) + // This must happen before `cleanup_empty_groups` so empty checks are accurate. + match self.cleanup_deleted_members(opctx).await { + Ok(count) => status.members_deleted += count, Err(e) => { - let msg = format!("failed to reconcile active groups: {e:#}"); + let msg = format!("failed to cleanup deleted members: {e:#}"); status.errors.push(msg); } } - // Process member state changes - match self.reconcile_member_states(opctx, &dataplane_client).await { - Ok(count) => status.members_processed += count, + // Implicitly delete empty groups (groups are automatically deleted when + // last member leaves) + // This handles the case where instance deletion causes members to be + // soft-deleted, and after cleanup, the group becomes empty. + match self.cleanup_empty_groups(opctx).await { + Ok(count) => status.empty_groups_marked += count, Err(e) => { - let msg = format!("failed to reconcile member states: {e:#}"); + let msg = format!("failed to cleanup empty groups: {e:#}"); status.errors.push(msg); } } - // Clean up deleted members ("Left" + `time_deleted`) - match self.cleanup_deleted_members(opctx).await { - Ok(count) => status.members_deleted += count, + // Process deleting groups (DPD cleanup + hard-delete from DB) + match self.reconcile_deleting_groups(opctx, &dataplane_client).await { + Ok(count) => status.groups_deleted += count, Err(e) => { - let msg = format!("failed to cleanup deleted members: {e:#}"); + let msg = format!("failed to reconcile deleting groups: {e:#}"); + status.errors.push(msg); + } + } + + // Reconcile active groups (verify state, update dataplane as needed) + match self.reconcile_active_groups(opctx, &dataplane_client).await { + Ok(count) => status.groups_verified += count, + Err(e) => { + let msg = format!("failed to reconcile active groups: {e:#}"); status.errors.push(msg); } } diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index a402ee2c1bd..764dace0c35 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -363,7 +363,7 @@ impl super::Nexus { &self, opctx: &OpContext, authz_instance: &authz::Instance, - multicast_groups: &[NameOrId], + multicast_groups: &[params::MulticastGroupIdentifier], ) -> Result<(), Error> { let instance_id = authz_instance.id(); @@ -390,7 +390,6 @@ impl super::Nexus { .multicast_group_members_list_by_instance( opctx, InstanceUuid::from_untyped_uuid(instance_id), - false, ) .await?; let current_group_ids: HashSet<_> = @@ -410,8 +409,9 @@ impl super::Nexus { let multicast_group_selector = params::MulticastGroupSelector { multicast_group: group_name_or_id.clone(), }; - let multicast_group_lookup = - self.multicast_group_lookup(opctx, &multicast_group_selector)?; + let multicast_group_lookup = self + .multicast_group_lookup(opctx, &multicast_group_selector) + .await?; let (.., db_group) = multicast_group_lookup.fetch_for(authz::Action::Read).await?; let id = db_group.id(); @@ -1500,7 +1500,6 @@ impl super::Nexus { .multicast_group_members_list_by_instance( opctx, InstanceUuid::from_untyped_uuid(authz_instance.id()), - false, // include_removed ) .await .map_err(|e| { diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index a550246aef7..e1da9b9742b 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -21,9 +21,13 @@ use nexus_db_queries::db; use nexus_db_queries::db::model::Name; use nexus_types::identity::Resource; use omicron_common::address::{ - IPV4_LINK_LOCAL_MULTICAST_SUBNET, IPV4_SSM_SUBNET, - IPV6_INTERFACE_LOCAL_MULTICAST_SUBNET, IPV6_LINK_LOCAL_MULTICAST_SUBNET, - IPV6_SSM_SUBNET, + IPV4_ADMIN_SCOPED_MULTICAST_SUBNET, IPV4_GLOP_MULTICAST_SUBNET, + IPV4_LINK_LOCAL_MULTICAST_SUBNET, IPV4_SPECIFIC_RESERVED_MULTICAST_ADDRS, + IPV4_SSM_SUBNET, IPV6_INTERFACE_LOCAL_MULTICAST_LAST, + IPV6_INTERFACE_LOCAL_MULTICAST_SUBNET, IPV6_LINK_LOCAL_MULTICAST_LAST, + IPV6_LINK_LOCAL_MULTICAST_SUBNET, IPV6_RESERVED_SCOPE_MULTICAST_LAST, + IPV6_RESERVED_SCOPE_MULTICAST_SUBNET, IPV6_SSM_SUBNET, Ipv4Range, + Ipv6Range, }; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; @@ -58,24 +62,80 @@ fn not_found_from_lookup(pool_lookup: &lookup::IpPool<'_>) -> Error { /// Validate multicast-specific constraints for IP ranges. /// -/// Enforces restrictions on multicast address ranges: -/// - IPv4: Rejects link-local (224.0.0.0/24), prevents ASM/SSM boundary spanning -/// - IPv6: Rejects interface-local (ff01::/16) and link-local (ff02::/16), -/// prevents ASM/SSM boundary spanning +/// Enforces restrictions on multicast address ranges to prevent allocation +/// of reserved or special-use addresses that would be rejected by Dendrite: +/// +/// - IPv4: Rejects link-local (224.0.0.0/24), GLOP (233.0.0.0/8), +/// admin-scoped (239.0.0.0/8), specific reserved addresses (NTP, Cisco +/// Auto-RP, PTP), and prevents ASM/SSM boundary spanning +/// - IPv6: Rejects reserved-scope (ff00::/16), interface-local (ff01::/16), +/// link-local (ff02::/16), and prevents ASM/SSM boundary spanning +/// +/// This validation ensures operators receive immediate feedback when +/// configuring IP pools, preventing users from encountering errors later +/// when allocating addresses for multicast groups. fn validate_multicast_range(range: &shared::IpRange) -> Result<(), Error> { + // These restrictions match the validation performed by Dendrite DPD + // management (see dendrite/dpd/src/mcast/validate.rs). match range { shared::IpRange::V4(v4_range) => { let first = v4_range.first_address(); let last = v4_range.last_address(); - // Reject IPv4 link-local multicast range (224.0.0.0/24) - if IPV4_LINK_LOCAL_MULTICAST_SUBNET.contains(first) - || IPV4_LINK_LOCAL_MULTICAST_SUBNET.contains(last) + // Reject IPv4 ranges that intersect reserved subnets { - return Err(Error::invalid_request( - "Cannot add IPv4 link-local multicast range \ - (224.0.0.0/24) to IP pool", - )); + // link-local (224.0.0.0/24) + let reserved = Ipv4Range { + first: IPV4_LINK_LOCAL_MULTICAST_SUBNET.addr(), + last: IPV4_LINK_LOCAL_MULTICAST_SUBNET + .broadcast() + .expect("valid IPv4 subnet"), + }; + if v4_range.overlaps(&reserved) { + return Err(Error::invalid_request( + "Cannot add IPv4 link-local multicast range \ + (224.0.0.0/24) to IP pool", + )); + } + + // GLOP (233.0.0.0/8) + let reserved = Ipv4Range { + first: IPV4_GLOP_MULTICAST_SUBNET.addr(), + last: IPV4_GLOP_MULTICAST_SUBNET + .broadcast() + .expect("valid IPv4 subnet"), + }; + if v4_range.overlaps(&reserved) { + return Err(Error::invalid_request( + "Cannot add IPv4 GLOP multicast range \ + (233.0.0.0/8) to IP pool", + )); + } + + // admin-scoped (239.0.0.0/8) + let reserved = Ipv4Range { + first: IPV4_ADMIN_SCOPED_MULTICAST_SUBNET.addr(), + last: IPV4_ADMIN_SCOPED_MULTICAST_SUBNET + .broadcast() + .expect("valid IPv4 subnet"), + }; + if v4_range.overlaps(&reserved) { + return Err(Error::invalid_request( + "Cannot add IPv4 administratively scoped multicast range \ + (239.0.0.0/8) to IP pool", + )); + } + } + + // Reject ranges that contain specific reserved addresses + // (NTP, Cisco Auto-RP, PTP) - aligned with Dendrite validation + for &reserved_addr in &IPV4_SPECIFIC_RESERVED_MULTICAST_ADDRS { + if v4_range.contains(reserved_addr) { + return Err(Error::invalid_request(format!( + "Cannot add range containing specifically reserved \ + multicast address {reserved_addr} to IP pool" + ))); + } } // Validate range doesn't span ASM/SSM boundary @@ -92,24 +152,43 @@ fn validate_multicast_range(range: &shared::IpRange) -> Result<(), Error> { let first = v6_range.first_address(); let last = v6_range.last_address(); - // Reject interface-local (ff01::/16) and link-local (ff02::/16) - // IPv6 multicast ranges - if IPV6_INTERFACE_LOCAL_MULTICAST_SUBNET.contains(first) - || IPV6_INTERFACE_LOCAL_MULTICAST_SUBNET.contains(last) + // Reject IPv6 ranges that intersect reserved subnets { - return Err(Error::invalid_request( - "Cannot add IPv6 interface-local multicast range \ - (ff01::/16) to IP pool", - )); - } + // reserved-scope (ff00::/16) + let reserved = Ipv6Range { + first: IPV6_RESERVED_SCOPE_MULTICAST_SUBNET.addr(), + last: IPV6_RESERVED_SCOPE_MULTICAST_LAST, + }; + if v6_range.overlaps(&reserved) { + return Err(Error::invalid_request( + "Cannot add IPv6 reserved-scope multicast range \ + (ff00::/16) to IP pool", + )); + } - if IPV6_LINK_LOCAL_MULTICAST_SUBNET.contains(first) - || IPV6_LINK_LOCAL_MULTICAST_SUBNET.contains(last) - { - return Err(Error::invalid_request( - "Cannot add IPv6 link-local multicast range \ - (ff02::/16) to IP pool", - )); + // interface-local (ff01::/16) + let reserved = Ipv6Range { + first: IPV6_INTERFACE_LOCAL_MULTICAST_SUBNET.addr(), + last: IPV6_INTERFACE_LOCAL_MULTICAST_LAST, + }; + if v6_range.overlaps(&reserved) { + return Err(Error::invalid_request( + "Cannot add IPv6 interface-local multicast range \ + (ff01::/16) to IP pool", + )); + } + + // link-local (ff02::/16) + let reserved = Ipv6Range { + first: IPV6_LINK_LOCAL_MULTICAST_SUBNET.addr(), + last: IPV6_LINK_LOCAL_MULTICAST_LAST, + }; + if v6_range.overlaps(&reserved) { + return Err(Error::invalid_request( + "Cannot add IPv6 link-local multicast range \ + (ff02::/16) to IP pool", + )); + } } // Validate range doesn't span ASM/SSM boundary diff --git a/nexus/src/app/multicast/dataplane.rs b/nexus/src/app/multicast/dataplane.rs index 9e4a2067321..047ab58b573 100644 --- a/nexus/src/app/multicast/dataplane.rs +++ b/nexus/src/app/multicast/dataplane.rs @@ -361,7 +361,7 @@ impl MulticastDataplaneClient { ); let dpd_clients = &self.dpd_clients; - let tag = external_group.name().to_string(); + let tag = external_group.dpd_tag(); // Convert MVLAN to u16 for DPD, validating through VlanID let vlan_id = external_group diff --git a/nexus/src/app/multicast/mod.rs b/nexus/src/app/multicast/mod.rs index 6cf27dc88db..009f3d99c7d 100644 --- a/nexus/src/app/multicast/mod.rs +++ b/nexus/src/app/multicast/mod.rs @@ -2,140 +2,134 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! Multicast group management for network traffic distribution. +//! Multicast group management. //! -//! Group creation, member management, and IP pool integration following -//! the bifurcated design from [RFD 488](https://rfd.shared.oxide.computer/rfd/488). +//! See [RFD 488](https://rfd.shared.oxide.computer/rfd/488) for the bifurcated design. //! -//! ## Fleet-Scoped Authorization Model +//! # Scoping //! -//! Multicast groups are **fleet-scoped resources** (authz parent = "Fleet"), -//! similar to IP pools. This enables: +//! Multicast groups are fleet-scoped for visibility: any authenticated user can +//! list and read groups, and instances from different projects or silos can +//! join the same group. //! -//! - **Cross-project multicast**: Instances from different projects can join -//! the same group without IP waste -//! - **Cross-silo multicast**: Instances from different silos can join the -//! same group (when pools are linked to multiple silos) +//! # Access control //! -//! ### Authorization Rules +//! Fleet administrators control multicast access by creating multicast IP pools +//! and linking them to silos. A silo can only use multicast pools that are +//! linked to it. Cross-silo multicast is enabled by linking the same pool to +//! multiple silos. This is the same model used for unicast IP pools. //! -//! - **Creating/modifying/deleting groups**: Any authenticated user in the fleet (silo users) -//! can create, modify, and delete multicast groups -//! - **Reading/listing groups**: Any authenticated user in the fleet can read and list groups -//! (enables discovery of available groups for joining instances) -//! - **Listing group members**: Only requires Read permission on the group (fleet-scoped), -//! not permissions on individual member instances -//! - **Adding/removing members**: Requires Read on group + Modify on the specific instance -//! (project collaborators can attach only their own instances to any fleet-scoped group) +//! # Lifecycle //! -//! ### VNI Assignment +//! Groups are created implicitly when the first member joins (via member-add) +//! and deleted when the last member leaves. The group's IP is allocated from +//! the multicast pool on creation and returned on deletion. //! -//! All fleet-scoped multicast groups use `DEFAULT_MULTICAST_VNI` (77), which is -//! reserved for fleet-wide multicast traffic and below the `MIN_GUEST_VNI` (1024) -//! threshold. This ensures consistent behavior across all multicast groups. +//! Groups use their UUID as the dpd tag for switch configuration. This avoids +//! races when group names are reused after deletion. +//! +//! # Authorization +//! +//! - list/read groups: any authenticated user +//! - add/remove members: requires read on the group and modify on the instance +//! +//! # VNI +//! +//! All multicast groups use `DEFAULT_MULTICAST_VNI` (77), which is reserved for +//! multicast and below the guest VNI range. +use std::collections::HashSet; use std::net::IpAddr; use std::sync::Arc; +use ipnetwork::IpNetwork; use ref_cast::RefCast; -use nexus_config::DEFAULT_UNDERLAY_MULTICAST_NET; use nexus_db_lookup::{LookupPath, lookup}; use nexus_db_model::Name; use nexus_db_queries::context::OpContext; use nexus_db_queries::{authz, db}; use nexus_types::external_api::{params, views}; +use nexus_types::identity::Resource; +use nexus_types::multicast::MulticastGroupCreate; use omicron_common::address::{IPV4_SSM_SUBNET, IPV6_SSM_SUBNET}; use omicron_common::api::external::{ - self, CreateResult, DataPageParams, DeleteResult, Error, ListResultVec, - LookupResult, NameOrId, UpdateResult, http_pagination::PaginatedBy, + self, CreateResult, DataPageParams, DeleteResult, + IdentityMetadataCreateParams, ListResultVec, LookupResult, + http_pagination::PaginatedBy, }; use omicron_uuid_kinds::{GenericUuid, InstanceUuid, MulticastGroupUuid}; pub(crate) mod dataplane; impl super::Nexus { - /// Look up a fleet-scoped multicast group by name or ID. - pub(crate) fn multicast_group_lookup<'a>( + /// Look up a fleet-scoped multicast group by name, ID, or IP address. + /// + /// Returns a lookup builder for authorization and fetching. For IP lookups, + /// the group is fetched first to resolve the ID, then a builder is created. + pub(crate) async fn multicast_group_lookup<'a>( &'a self, opctx: &'a OpContext, multicast_group_selector: &'a params::MulticastGroupSelector, ) -> LookupResult> { // Multicast groups are fleet-scoped (like IP pools) match &multicast_group_selector.multicast_group { - NameOrId::Id(id) => { + params::MulticastGroupIdentifier::Id(id) => { let multicast_group = LookupPath::new(opctx, &self.db_datastore) .multicast_group_id(*id); Ok(multicast_group) } - NameOrId::Name(name) => { + params::MulticastGroupIdentifier::Name(name) => { let multicast_group = LookupPath::new(opctx, &self.db_datastore) .multicast_group_name(Name::ref_cast(name)); Ok(multicast_group) } + params::MulticastGroupIdentifier::Ip(ip) => { + // IP lookup requires fetching first to resolve the ID + let group = self + .db_datastore + .multicast_group_lookup_by_ip(opctx, *ip) + .await?; + let multicast_group = + LookupPath::new(opctx, &self.db_datastore) + .multicast_group_id(group.identity.id); + Ok(multicast_group) + } } } - /// Create a multicast group. + /// Create a multicast group (called during implicit creation from join). + /// + /// Access control is enforced by pool linking: the IP is allocated from a + /// multicast pool linked to the caller's silo. pub(crate) async fn multicast_group_create( &self, opctx: &OpContext, - params: ¶ms::MulticastGroupCreate, + params: &MulticastGroupCreate, ) -> CreateResult { - // Authorization FIRST: check before validating parameters - // This ensures 403 Forbidden is returned before 400 Bad Request - opctx - .authorize(authz::Action::CreateChild, &authz::MULTICAST_GROUP_LIST) - .await?; - - // If an explicit multicast IP is provided, validate ASM/SSM semantics - // and ensure it does not collide with the fixed underlay prefix. - // - ASM IPs must not specify sources - // - SSM IPs must specify at least one source + // If an explicit multicast IP is provided, validate ASM/SSM semantics. + // + // Reserved ranges (ff00-ff02::/16) are validated at IP pool creation. + // ff04::/16 addresses are allowed in pools, but the reconciler XOR-folds + // them during underlay mapping to prevent collision with the fixed + // underlay prefix (ff04::/64). See `map_external_to_underlay_ip_impl`. + // + // - ASM IPs should not specify sources + // - SSM IPs require at least one source if let Some(mcast_ip) = params.multicast_ip { let empty: Vec = Vec::new(); let sources: &[IpAddr] = params.source_ips.as_deref().unwrap_or(&empty); validate_ssm_configuration(mcast_ip, sources)?; - - // Block external IPv6 multicast addresses that fall within the - // fixed underlay admin-local prefix (reserved for underlay). - if let IpAddr::V6(ipv6) = mcast_ip { - // Convert fixed underlay prefix to ipnet and compare - let fixed_underlay: ipnet::Ipv6Net = - DEFAULT_UNDERLAY_MULTICAST_NET - .to_string() - .parse() - .expect("valid fixed underlay admin prefix"); - if fixed_underlay.contains(&ipv6) { - return Err(Error::invalid_request(&format!( - "IPv6 address {ipv6} is within the reserved underlay multicast prefix {}", - fixed_underlay - ))); - } - } } - let authz_pool = match ¶ms.pool { - Some(pool_selector) => { - let authz_pool = self - .ip_pool_lookup(opctx, &pool_selector)? - .lookup_for(authz::Action::CreateChild) - .await? - .0; - - // Validate that the pool is of type Multicast - Some( - self.db_datastore - .resolve_pool_for_allocation( - opctx, - Some(authz_pool), - nexus_db_model::IpPoolType::Multicast, - ) - .await?, - ) + // If multicast_ip is provided, discover the pool containing that IP. + // Otherwise, pool resolution happens in the datastore layer. + let authz_pool = match params.multicast_ip { + Some(ip) => { + Some(self.resolve_pool_for_multicast_ip(opctx, ip).await?) } None => None, }; @@ -151,29 +145,76 @@ impl super::Nexus { Ok(group) } - /// Fetch a multicast group. - pub(crate) async fn multicast_group_fetch( + /// View a multicast group by selector. + /// + /// For IP lookups, this avoids a double-fetch by fetching once to get + /// the group, building the authz object, and authorizing. For Name/ID + /// lookups, this uses the standard lookup + fetch path. + pub(crate) async fn multicast_group_view( &self, opctx: &OpContext, - group_lookup: &lookup::MulticastGroup<'_>, + selector: ¶ms::MulticastGroupSelector, ) -> LookupResult { - let (.., group_id) = - group_lookup.lookup_for(authz::Action::Read).await?; - self.db_datastore - .multicast_group_fetch( - opctx, - MulticastGroupUuid::from_untyped_uuid(group_id.id()), - ) - .await + match &selector.multicast_group { + params::MulticastGroupIdentifier::Ip(ip) => { + // IP lookup - fetch once and authorize + let group = self + .db_datastore + .multicast_group_lookup_by_ip(opctx, *ip) + .await?; + let authz_group = authz::MulticastGroup::new( + authz::FLEET, + group.identity.id, + external::LookupType::ById(group.identity.id), + ); + opctx.authorize(authz::Action::Read, &authz_group).await?; + Ok(group) + } + _ => { + // Name/ID lookup - use lookup builder + fetch + let group_lookup = + self.multicast_group_lookup(opctx, selector).await?; + let (.., authz_group) = + group_lookup.lookup_for(authz::Action::Read).await?; + self.db_datastore + .multicast_group_fetch( + opctx, + MulticastGroupUuid::from_untyped_uuid(authz_group.id()), + ) + .await + } + } } - /// Look up multicast group by IP address. - pub(crate) async fn multicast_group_lookup_by_ip( + /// Resolve which multicast pool contains a given IP address. + /// + /// Used for join-by-IP functionality where the user specifies a multicast + /// IP address directly. The system auto-discovers which pool contains the + /// IP (pool ranges are globally unique, so lookup is unambiguous) and + /// returns the authz pool for group creation. + /// + /// Note: only multicast pools linked to the caller's silo are + /// considered. Pool linking controls access to multicast addresses. + pub(crate) async fn resolve_pool_for_multicast_ip( &self, opctx: &OpContext, - ip_addr: std::net::IpAddr, - ) -> LookupResult { - self.db_datastore.multicast_group_lookup_by_ip(opctx, ip_addr).await + ip: IpAddr, + ) -> Result { + let pool = self + .db_datastore + .ip_pool_containing_multicast_ip(opctx, ip) + .await? + .ok_or_else(|| { + external::Error::invalid_request( + "multicast IP not in any pool's address range", + ) + })?; + + Ok(authz::IpPool::new( + authz::FLEET, + pool.id(), + external::LookupType::ById(pool.id()), + )) } /// List all multicast groups. @@ -191,133 +232,266 @@ impl super::Nexus { self.db_datastore.multicast_groups_list(opctx, pagparams).await } - /// Update a multicast group. - pub(crate) async fn multicast_group_update( - &self, + /// Join an instance to a multicast group by identifier (IP, name, or ID). + /// + /// # Authorization + /// + /// Requires `Modify` on the instance. Groups are fleet-scoped resources + /// readable by any authenticated user; authorization is enforced on the + /// instance being attached. + /// + /// # Behavior + /// + /// - **IP/name joins**: Creates the group implicitly if it doesn't exist + /// - **ID joins**: The group must already exist (returns error otherwise) + /// - **SSM validation**: If `source_ips` provided, validates SSM configuration + pub(crate) async fn instance_join_multicast_group( + self: &Arc, opctx: &OpContext, - group_lookup: &lookup::MulticastGroup<'_>, - params: ¶ms::MulticastGroupUpdate, - ) -> UpdateResult { - let (.., group_id) = - group_lookup.lookup_for(authz::Action::Modify).await?; - - // Get the current group to check state and get underlay group ID - let current_group = self - .db_datastore - .multicast_group_fetch( - opctx, - MulticastGroupUuid::from_untyped_uuid(group_id.id()), - ) - .await?; - - // Ensure group is in "Active" state (should have `underlay_group_id`) - if current_group.state != db::model::MulticastGroupState::Active { - return Err(Error::invalid_request(&format!( - "cannot update multicast group in state: {state}. group must be in \"Active\" state.", - state = current_group.state - ))); + group_identifier: ¶ms::MulticastGroupIdentifier, + instance_lookup: &lookup::Instance<'_>, + source_ips: &Option>, + ) -> CreateResult { + // Check if multicast is enabled + if !self.multicast_enabled() { + return Err(external::Error::invalid_request( + "multicast functionality is currently disabled", + )); } - // Ensure the group has an associated underlay group (required for updates) - current_group.underlay_group_id.ok_or_else(|| { - Error::internal_error( - "active multicast group missing `underlay_group_id`", - ) - })?; + // Authorize instance modification upfront + let (.., authz_instance) = + instance_lookup.lookup_for(authz::Action::Modify).await?; - // Validate the new source configuration if provided - if let Some(ref new_source_ips) = params.source_ips { - validate_ssm_configuration( - current_group.multicast_ip.ip(), - new_source_ips, - )?; - } + // Find or create the group based on identifier type + let group_id = match group_identifier { + params::MulticastGroupIdentifier::Ip(ip) => { + self.join_resolve_by_ip(opctx, *ip, source_ips).await? + } + params::MulticastGroupIdentifier::Name(name) => { + self.join_resolve_by_name( + opctx, + name.clone().into(), + source_ips, + ) + .await? + } + params::MulticastGroupIdentifier::Id(id) => { + self.join_resolve_by_id(opctx, *id, source_ips).await? + } + }; - // Update the database - let result = self + // Attach the member + let member = self .db_datastore - .multicast_group_update( + .multicast_group_member_add( opctx, - MulticastGroupUuid::from_untyped_uuid(group_id.id()), - params, + group_id, + InstanceUuid::from_untyped_uuid(authz_instance.id()), ) .await?; - // Activate RPW to apply changes to DPD (eventually consistent) - // The reconciler will detect drift and launch the UPDATE saga + // Activate reconciler to process the new member self.background_tasks.task_multicast_reconciler.activate(); - - Ok(result) + Ok(member) } - /// Tag a multicast group for deletion. - pub(crate) async fn multicast_group_delete( + /// Resolve group by IP: find existing or create new. + async fn join_resolve_by_ip( &self, opctx: &OpContext, - group_lookup: &lookup::MulticastGroup<'_>, - ) -> DeleteResult { - let (.., group_id) = - group_lookup.lookup_for(authz::Action::Delete).await?; + ip: IpAddr, + source_ips: &Option>, + ) -> Result { + // Try to find existing group by IP + match self.db_datastore.multicast_group_lookup_by_ip(opctx, ip).await { + Ok(existing) => { + // Authorize Read for audit trail symmetry with name/ID paths + let authz_group = authz::MulticastGroup::new( + authz::FLEET, + existing.identity.id, + external::LookupType::ById(existing.identity.id), + ); + opctx.authorize(authz::Action::Read, &authz_group).await?; + validate_sources_match(source_ips, &existing.source_ips)?; + return Ok(MulticastGroupUuid::from_untyped_uuid( + existing.identity.id, + )); + } + Err(external::Error::ObjectNotFound { .. }) => { + // Fall through to create + } + Err(e) => return Err(e), + } - // Mark for deletion via RPW: sets state="Deleting" (not soft-delete). - // RPW cleanup ensures DPD configuration is removed before final deletion. - self.db_datastore - .mark_multicast_group_for_removal( - opctx, - MulticastGroupUuid::from_untyped_uuid(group_id.id()), - ) - .await?; + // SSM addresses require at least one source IP + if is_ssm_address(ip) && source_ips.is_none() { + return Err(external::Error::invalid_request( + "SSM multicast addresses require at least one source IP", + )); + } - // Activate reconciler to process the "Deleting" state - self.background_tasks.task_multicast_reconciler.activate(); + // Source IPs must match the multicast group's address family + validate_source_address_family(ip, source_ips)?; + + let create_params = MulticastGroupCreate { + identity: IdentityMetadataCreateParams { + name: generate_group_name_from_ip(ip)?, + description: format!( + "Implicitly created multicast group for {ip}" + ), + }, + multicast_ip: Some(ip), + source_ips: source_ips.clone(), + mvlan: None, + }; - Ok(()) + // Create the group; on conflict -> re-lookup + match self.multicast_group_create(opctx, &create_params).await { + Ok(created) => { + Ok(MulticastGroupUuid::from_untyped_uuid(created.identity.id)) + } + Err(external::Error::ObjectAlreadyExists { .. }) => { + // Another request created it first, validate sources match + let group = self + .db_datastore + .multicast_group_lookup_by_ip(opctx, ip) + .await?; + // Authorize Read for audit trail symmetry + let authz_group = authz::MulticastGroup::new( + authz::FLEET, + group.identity.id, + external::LookupType::ById(group.identity.id), + ); + opctx.authorize(authz::Action::Read, &authz_group).await?; + validate_sources_match(source_ips, &group.source_ips)?; + Ok(MulticastGroupUuid::from_untyped_uuid(group.identity.id)) + } + Err(e) => Err(e), + } } - /// Add an instance to a multicast group. - pub(crate) async fn multicast_group_member_attach( - self: &Arc, + /// Resolve group by name, either find existing or create a new group. + async fn join_resolve_by_name( + &self, opctx: &OpContext, - group_lookup: &lookup::MulticastGroup<'_>, - instance_lookup: &lookup::Instance<'_>, - ) -> CreateResult { - // Multicast groups are fleet-scoped - users only need Read permission on the group - // and Modify permission on the instance to attach it + name: Name, + source_ips: &Option>, + ) -> Result { + let selector = params::MulticastGroupSelector { + multicast_group: params::MulticastGroupIdentifier::Name( + name.clone().into(), + ), + }; + let group_lookup = + self.multicast_group_lookup(opctx, &selector).await?; + + // Check if group exists (`lookup_for` does authz + returns ID) + match group_lookup.lookup_for(authz::Action::Read).await { + Ok((.., authz_group)) => { + let group_id = + MulticastGroupUuid::from_untyped_uuid(authz_group.id()); + let group = self + .db_datastore + .multicast_group_fetch(opctx, group_id) + .await?; + validate_sources_match(source_ips, &group.source_ips)?; + return Ok(group_id); + } + Err(external::Error::ObjectNotFound { .. }) => { + // Fall through to create + } + Err(e) => return Err(e), + } + + let create_params = MulticastGroupCreate { + identity: IdentityMetadataCreateParams { + name: name.into(), + description: "Implicitly created for instance attachment" + .to_string(), + }, + multicast_ip: None, + source_ips: source_ips.clone(), + mvlan: None, + }; + + // Create the group; on conflict -> re-lookup + match self.multicast_group_create(opctx, &create_params).await { + Ok(created) => { + Ok(MulticastGroupUuid::from_untyped_uuid(created.identity.id)) + } + Err(external::Error::ObjectAlreadyExists { .. }) => { + // Another request created it first, re-lookup and validate + let (.., authz_group) = + group_lookup.lookup_for(authz::Action::Read).await?; + let group_id = + MulticastGroupUuid::from_untyped_uuid(authz_group.id()); + let group = self + .db_datastore + .multicast_group_fetch(opctx, group_id) + .await?; + validate_sources_match(source_ips, &group.source_ips)?; + Ok(group_id) + } + Err(e) => Err(e), + } + } + + /// Resolve group by ID: must exist, no implicit creation. + async fn join_resolve_by_id( + &self, + opctx: &OpContext, + id: uuid::Uuid, + source_ips: &Option>, + ) -> Result { + let selector = params::MulticastGroupSelector { + multicast_group: params::MulticastGroupIdentifier::Id(id), + }; + let group_lookup = + self.multicast_group_lookup(opctx, &selector).await?; + + // Authorize and fetch - group must exist let (.., authz_group) = group_lookup.lookup_for(authz::Action::Read).await?; - let (.., authz_instance) = - instance_lookup.lookup_for(authz::Action::Modify).await?; + let group_id = MulticastGroupUuid::from_untyped_uuid(authz_group.id()); + let group = + self.db_datastore.multicast_group_fetch(opctx, group_id).await?; + validate_sources_match(source_ips, &group.source_ips)?; - let member = self - .db_datastore - .multicast_group_member_add( - opctx, - MulticastGroupUuid::from_untyped_uuid(authz_group.id()), - InstanceUuid::from_untyped_uuid(authz_instance.id()), - ) - .await?; - - // Activate reconciler to process the new member ("Joining" → "Joined") - self.background_tasks.task_multicast_reconciler.activate(); - Ok(member) + Ok(group_id) } /// Remove an instance from a multicast group. - pub(crate) async fn multicast_group_member_detach( + /// + /// # Authorization + /// + /// Requires `Read` on the group and `Modify` on the instance. + /// + /// # Behavior + /// + /// - **Idempotent**: Returns success if the member doesn't exist + /// - **Implicit deletion**: If this was the last member, marks the group + /// for deletion (reconciler completes cleanup) + pub(crate) async fn instance_leave_multicast_group( self: &Arc, opctx: &OpContext, group_lookup: &lookup::MulticastGroup<'_>, instance_lookup: &lookup::Instance<'_>, ) -> DeleteResult { - // Multicast groups are fleet-scoped - users only need Read permission on the group - // and Modify permission on the instance to detach it - let (.., authz_group) = - group_lookup.lookup_for(authz::Action::Read).await?; + // Check if multicast is enabled - if not, skip member removal + if !self.multicast_enabled() { + return Err(external::Error::invalid_request( + "multicast functionality is currently disabled", + )); + } + + // Authorize: Modify on instance (checked first), Read on group let (.., authz_instance) = instance_lookup.lookup_for(authz::Action::Modify).await?; + let (.., authz_group) = + group_lookup.lookup_for(authz::Action::Read).await?; - // First, get the member ID by group and instance - // For idempotency, if the member doesn't exist, we consider the removal successful + // Idempotent: if member doesn't exist, return success let member = match self .db_datastore .multicast_group_member_get_by_group_and_instance( @@ -329,7 +503,6 @@ impl super::Nexus { { Some(member) => member, None => { - // Member doesn't exist - removal is idempotent, return success return Ok(()); } }; @@ -338,25 +511,34 @@ impl super::Nexus { .multicast_group_member_delete_by_id(opctx, member.id) .await?; - // Activate reconciler to process the member removal + // Atomically mark group for deletion if this was the last member. + // The NOT EXISTS guard in the datastore method prevents race conditions + // where a concurrent join could slip in between a "list members" check + // and the mark-for-removal call. + let _ = self + .db_datastore + .mark_multicast_group_for_removal_if_no_members( + opctx, + MulticastGroupUuid::from_untyped_uuid(authz_group.id()), + ) + .await?; + + // Activate reconciler to process the member removal (and group deletion if triggered) self.background_tasks.task_multicast_reconciler.activate(); Ok(()) } /// List members of a multicast group. /// - /// ## Authorization + /// # Authorization /// - /// This operation only requires "Read" permission on the multicast group - /// itself (fleet-scoped). It does NOT check permissions on the individual - /// instances that are members of the group. + /// Requires `Read` on the multicast group (fleet-scoped). Does not check + /// permissions on individual member instances. /// /// This asymmetry is intentional: - /// - **Listing members**: Allows discovery of which instances are in a group - /// (useful for understanding multicast group membership across projects) - /// - **Adding/removing members**: Requires Modify permission on the specific - /// instance (project-scoped), enforcing that users can only manage instances - /// they own + /// - **Listing members**: Allows discovery of group membership across projects + /// - **Adding/removing members**: Requires `Modify` on the specific instance + /// (project-scoped), so users can only manage their own instances /// /// Note: When unauthorized users attempt to add/remove instances they don't /// have access to, the instance lookup fails with 404 (not 403) to prevent @@ -368,7 +550,7 @@ impl super::Nexus { pagparams: &DataPageParams<'_, uuid::Uuid>, ) -> ListResultVec { let (.., group_id) = - group_lookup.lookup_for(authz::Action::Read).await?; + group_lookup.lookup_for(authz::Action::ListChildren).await?; self.db_datastore .multicast_group_members_list( opctx, @@ -396,7 +578,6 @@ impl super::Nexus { .multicast_group_members_list_by_instance( opctx, InstanceUuid::from_untyped_uuid(authz_instance.id()), - false, ) .await?; members @@ -406,12 +587,9 @@ impl super::Nexus { } } -/// Validate Source-Specific Multicast (SSM) configuration per RFC 4607: -/// +/// Validate SSM configuration per [RFC 4607]: IPv4 232/8 or IPv6 ff30::/12. /// -/// This function validates that: -/// 1. For IPv4 SSM: multicast address is in 232/8 range -/// 2. For IPv6 SSM: multicast address is in FF30::/12 range (covers all FF3x::/32 SSM scopes) +/// [RFC 4607]: https://www.rfc-editor.org/rfc/rfc4607 fn validate_ssm_configuration( multicast_ip: IpAddr, source_ips: &[IpAddr], @@ -435,6 +613,98 @@ fn validate_ssm_configuration( } } +// Private helpers for join logic + +/// Check if an IP is in the SSM range. +fn is_ssm_address(ip: IpAddr) -> bool { + match ip { + IpAddr::V4(addr) => IPV4_SSM_SUBNET.contains(addr), + IpAddr::V6(addr) => IPV6_SSM_SUBNET.contains(addr), + } +} + +/// Validate that source IPs match the multicast group's address family. +fn validate_source_address_family( + multicast_ip: IpAddr, + source_ips: &Option>, +) -> Result<(), external::Error> { + let Some(sources) = source_ips else { + return Ok(()); + }; + + let is_v4_group = multicast_ip.is_ipv4(); + for source in sources { + if source.is_ipv4() != is_v4_group { + return Err(external::Error::invalid_request(&format!( + "source IP {source} does not match multicast group address family ({})", + if is_v4_group { "IPv4" } else { "IPv6" } + ))); + } + } + Ok(()) +} + +/// Generate a group name from an IP address (e.g., "mcast-224-1-2-3"). +fn generate_group_name_from_ip( + ip: IpAddr, +) -> Result { + let name_str = match ip { + IpAddr::V4(v4) => { + let [a, b, c, d] = v4.octets(); + format!("mcast-{a}-{b}-{c}-{d}") + } + IpAddr::V6(v6) => { + // Use segments for consistent formatting (avoids :: compression issues) + let segs = v6.segments(); + format!( + "mcast-{:x}-{:x}-{:x}-{:x}-{:x}-{:x}-{:x}-{:x}", + segs[0], + segs[1], + segs[2], + segs[3], + segs[4], + segs[5], + segs[6], + segs[7] + ) + } + }; + name_str.parse().map_err(|_| { + external::Error::internal_error(&format!( + "IP should be valid as group name: {ip}" + )) + }) +} + +/// Validate that requested sources match existing group sources. +/// +/// If `requested` is `None`, the join inherits the group's existing sources +/// (implicit acceptance). If `requested` is `Some`, the sources must exactly +/// match the group's existing sources - partial overlap is not allowed. +fn validate_sources_match( + requested: &Option>, + existing: &[IpNetwork], +) -> Result<(), external::Error> { + // None means "inherit existing sources" - always valid + let Some(req_sources) = requested else { + return Ok(()); + }; + + let requested_set: HashSet = + req_sources.iter().copied().map(IpNetwork::from).collect(); + let existing_set: HashSet<&IpNetwork> = existing.iter().collect(); + + if requested_set.len() != existing_set.len() + || !requested_set.iter().all(|ip| existing_set.contains(ip)) + { + return Err(external::Error::invalid_request( + "multicast group already exists with different source IPs", + )); + } + + Ok(()) +} + #[cfg(test)] mod tests { use super::*; diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index 9acfb435e36..a744c5fa72e 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -1081,6 +1081,7 @@ async fn sic_join_instance_multicast_group( let multicast_group_lookup = osagactx .nexus() .multicast_group_lookup(&opctx, &multicast_group_selector) + .await .map_err(ActionError::action_failed)?; // Multicast groups are fleet-scoped - users only need Read permission on the group @@ -1157,7 +1158,8 @@ async fn sic_join_instance_multicast_group_undo( }; let multicast_group_lookup = osagactx .nexus() - .multicast_group_lookup(&opctx, &multicast_group_selector)?; + .multicast_group_lookup(&opctx, &multicast_group_selector) + .await?; // Undo uses same permission as forward action (Read on multicast group) let (.., db_group) = multicast_group_lookup.fetch_for(authz::Action::Read).await?; diff --git a/nexus/src/app/sagas/instance_delete.rs b/nexus/src/app/sagas/instance_delete.rs index 0edc640cdc5..143c9939913 100644 --- a/nexus/src/app/sagas/instance_delete.rs +++ b/nexus/src/app/sagas/instance_delete.rs @@ -168,6 +168,14 @@ async fn sid_leave_multicast_groups( .await .map_err(ActionError::action_failed)?; + // Activate the multicast reconciler to process the member cleanup chain: + // cleanup deleted members → find empty groups → mark them "Deleting" → + // process "Deleting" groups (DPD cleanup) → hard-delete from DB + let nexus = osagactx.nexus(); + nexus + .background_tasks + .activate(&nexus.background_tasks.task_multicast_reconciler); + info!( osagactx.log(), "Marked multicast members for removal"; diff --git a/nexus/src/app/sagas/mod.rs b/nexus/src/app/sagas/mod.rs index 642c7a3947f..591f205a9c3 100644 --- a/nexus/src/app/sagas/mod.rs +++ b/nexus/src/app/sagas/mod.rs @@ -37,7 +37,6 @@ pub mod instance_migrate; pub mod instance_start; pub mod instance_update; pub mod multicast_group_dpd_ensure; -pub mod multicast_group_dpd_update; pub mod project_create; pub mod region_replacement_drive; pub mod region_replacement_finish; @@ -187,8 +186,7 @@ fn make_action_registry() -> ActionRegistry { region_snapshot_replacement_step_garbage_collect::SagaRegionSnapshotReplacementStepGarbageCollect, region_snapshot_replacement_finish::SagaRegionSnapshotReplacementFinish, image_create::SagaImageCreate, - multicast_group_dpd_ensure::SagaMulticastGroupDpdEnsure, - multicast_group_dpd_update::SagaMulticastGroupDpdUpdate + multicast_group_dpd_ensure::SagaMulticastGroupDpdEnsure ]; #[cfg(test)] diff --git a/nexus/src/app/sagas/multicast_group_dpd_ensure.rs b/nexus/src/app/sagas/multicast_group_dpd_ensure.rs index 365d1615c6e..20f82973a01 100644 --- a/nexus/src/app/sagas/multicast_group_dpd_ensure.rs +++ b/nexus/src/app/sagas/multicast_group_dpd_ensure.rs @@ -225,7 +225,7 @@ async fn mgde_rollback_dataplane( let (external_group, _) = sagactx .lookup::<(MulticastGroup, UnderlayMulticastGroup)>("group_data")?; - let multicast_tag = external_group.name().to_string(); + let multicast_tag = external_group.dpd_tag(); // Use MulticastDataplaneClient for consistent cleanup let dataplane = MulticastDataplaneClient::new( @@ -281,10 +281,9 @@ async fn mgde_update_group_state( // Transition the group from "Creating" -> "Active" osagactx .datastore() - .multicast_group_set_state( + .multicast_group_set_active( &opctx, MulticastGroupUuid::from_untyped_uuid(params.external_group_id), - nexus_db_model::MulticastGroupState::Active, ) .await .map_err(ActionError::action_failed)?; @@ -312,14 +311,11 @@ mod test { create_default_ip_pool, link_ip_pool, object_create, }; use nexus_test_utils_macros::nexus_test; - use nexus_types::external_api::params::{ - IpPoolCreate, MulticastGroupCreate, - }; + use nexus_types::external_api::params::IpPoolCreate; use nexus_types::external_api::shared::{IpRange, Ipv4Range}; use nexus_types::external_api::views::{IpPool, IpPoolRange, IpVersion}; - use omicron_common::api::external::{ - IdentityMetadataCreateParams, NameOrId, - }; + use nexus_types::multicast::MulticastGroupCreate; + use omicron_common::api::external::IdentityMetadataCreateParams; use crate::app::saga::create_saga_dag; use crate::app::sagas::test_helpers; @@ -464,7 +460,14 @@ mod test { // Link pool to silo link_ip_pool(client, pool_name, &DEFAULT_SILO.id(), false).await; - // Create multicast group via API (starts in Creating state) + // Create multicast group directly via datastore. + let (authz_pool, _) = nexus + .ip_pool_lookup(&opctx, &pool_name.parse().unwrap()) + .expect("Pool lookup should succeed") + .fetch() + .await + .expect("Pool should exist"); + let group_params = MulticastGroupCreate { identity: IdentityMetadataCreateParams { name: "saga-reject-test".parse().unwrap(), @@ -472,22 +475,17 @@ mod test { }, multicast_ip: Some(IpAddr::V4(Ipv4Addr::new(224, 70, 0, 100))), source_ips: None, - pool: Some(NameOrId::Name("saga-state-pool".parse().unwrap())), mvlan: None, }; - let group: nexus_types::external_api::views::MulticastGroup = - object_create(client, "/v1/multicast-groups", &group_params).await; - - // Fetch the external group from database to get full model + let external_group = datastore + .multicast_group_create(&opctx, &group_params, Some(authz_pool)) + .await + .expect("Multicast group should be created"); let group_id = omicron_uuid_kinds::MulticastGroupUuid::from_untyped_uuid( - group.identity.id, + external_group.id(), ); - let external_group = datastore - .multicast_group_fetch(&opctx, group_id) - .await - .expect("Failed to fetch external group"); // Manually create underlay group (normally done by reconciler) let underlay_group = datastore @@ -497,22 +495,18 @@ mod test { "ff04::1:2:3:4".parse().unwrap(), ) .await - .expect("Failed to create underlay group"); + .expect("Underlay group should be created"); // Manually transition the group to "Active" state in the database datastore - .multicast_group_set_state( - &opctx, - MulticastGroupUuid::from_untyped_uuid(group.identity.id), - nexus_db_model::MulticastGroupState::Active, - ) + .multicast_group_set_active(&opctx, group_id) .await - .expect("Failed to set group to Active state"); + .expect("Group should transition to Active state"); // Try to run saga on Active group - should fail let params = Params { serialized_authn: Serialized::for_opctx(&opctx), - external_group_id: group.identity.id, + external_group_id: external_group.id(), underlay_group_id: underlay_group.id, }; @@ -523,12 +517,5 @@ mod test { // Saga should reject Active group assert!(result.is_err(), "Saga should reject group in Active state"); - - // Cleanup - nexus_test_utils::resource_helpers::object_delete( - client, - &format!("/v1/multicast-groups/{}", group.identity.name), - ) - .await; } } diff --git a/nexus/src/app/sagas/multicast_group_dpd_update.rs b/nexus/src/app/sagas/multicast_group_dpd_update.rs deleted file mode 100644 index 33d9717b2e3..00000000000 --- a/nexus/src/app/sagas/multicast_group_dpd_update.rs +++ /dev/null @@ -1,271 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -//! Saga for updating multicast group state in dataplane via DPD. -//! -//! Handles atomic updates of external and underlay multicast groups in DPD. -//! Reads current state from database and applies to all switches. -//! -//! Idempotent saga can be called multiple times safely. If group state hasn't -//! changed, DPD-update is effectively a no-op. - -use anyhow::Context; -use serde::{Deserialize, Serialize}; -use slog::{debug, info}; -use steno::{ActionError, DagBuilder, Node}; -use uuid::Uuid; - -use dpd_client::types::{ - MulticastGroupExternalResponse, MulticastGroupUnderlayResponse, -}; - -use nexus_db_model::{MulticastGroup, UnderlayMulticastGroup}; -use nexus_db_queries::authn; -use nexus_types::identity::Resource; -use omicron_uuid_kinds::{GenericUuid, MulticastGroupUuid}; - -use super::{ActionRegistry, NexusActionContext, NexusSaga, SagaInitError}; -use crate::app::multicast::dataplane::{ - GroupUpdateParams, MulticastDataplaneClient, -}; -use crate::app::sagas::declare_saga_actions; - -#[derive(Debug, Deserialize, Serialize, Clone)] -pub(crate) struct Params { - /// Authentication context - pub serialized_authn: authn::saga::Serialized, - /// External multicast group to update - pub external_group_id: Uuid, - /// Underlay multicast group to update - pub underlay_group_id: Uuid, -} - -#[derive(Debug, Deserialize, Serialize)] -pub struct DataplaneUpdateResponse { - underlay: MulticastGroupUnderlayResponse, - external: MulticastGroupExternalResponse, -} - -declare_saga_actions! { - multicast_group_dpd_update; - - FETCH_GROUP_DATA -> "group_data" { - + mgu_fetch_group_data - } - UPDATE_DATAPLANE -> "update_responses" { - + mgu_update_dataplane - - mgu_rollback_dataplane - } -} - -#[derive(Debug)] -pub struct SagaMulticastGroupDpdUpdate; -impl NexusSaga for SagaMulticastGroupDpdUpdate { - const NAME: &'static str = "multicast-group-dpd-update"; - type Params = Params; - - fn register_actions(registry: &mut ActionRegistry) { - multicast_group_dpd_update_register_actions(registry); - } - - fn make_saga_dag( - _params: &Self::Params, - mut builder: DagBuilder, - ) -> Result { - builder.append(Node::action( - "group_data", - "FetchGroupData", - FETCH_GROUP_DATA.as_ref(), - )); - - builder.append(Node::action( - "update_responses", - "UpdateDataplane", - UPDATE_DATAPLANE.as_ref(), - )); - - Ok(builder.build()?) - } -} - -/// Fetch multicast group data from database. -async fn mgu_fetch_group_data( - sagactx: NexusActionContext, -) -> Result<(MulticastGroup, UnderlayMulticastGroup), ActionError> { - let osagactx = sagactx.user_data(); - let params = sagactx.saga_params::()?; - let opctx = crate::context::op_context_for_saga_action( - &sagactx, - ¶ms.serialized_authn, - ); - - debug!( - osagactx.log(), - "fetching multicast group data for DPD-update"; - "external_group_id" => %params.external_group_id, - "underlay_group_id" => %params.underlay_group_id - ); - - // Fetch external multicast group - let external_group = osagactx - .datastore() - .multicast_group_fetch( - &opctx, - MulticastGroupUuid::from_untyped_uuid(params.external_group_id), - ) - .await - .map_err(ActionError::action_failed)?; - - // Fetch underlay multicast group - let underlay_group = osagactx - .datastore() - .underlay_multicast_group_fetch(&opctx, params.underlay_group_id) - .await - .map_err(ActionError::action_failed)?; - - debug!( - osagactx.log(), - "successfully fetched multicast group data for DPD-update"; - "external_group_id" => %external_group.id(), - "external_group_name" => external_group.name().as_str(), - "external_ip" => %external_group.multicast_ip, - "underlay_group_id" => %underlay_group.id, - "underlay_ip" => %underlay_group.multicast_ip, - "sources" => ?external_group.source_ips - ); - - Ok((external_group, underlay_group)) -} - -/// Update external and underlay groups in dataplane atomically. -async fn mgu_update_dataplane( - sagactx: NexusActionContext, -) -> Result { - let osagactx = sagactx.user_data(); - let (external_group, underlay_group) = sagactx - .lookup::<(MulticastGroup, UnderlayMulticastGroup)>("group_data")?; - - // Use MulticastDataplaneClient for consistent DPD operations - let dataplane = MulticastDataplaneClient::new( - osagactx.nexus().resolver().clone(), - osagactx.log().clone(), - ) - .await - .map_err(ActionError::action_failed)?; - - debug!( - osagactx.log(), - "updating multicast group in DPD across switches (idempotent)"; - "switch_count" => %dataplane.switch_count(), - "external_group_id" => %external_group.id(), - "external_group_name" => external_group.name().as_str(), - "external_ip" => %external_group.multicast_ip, - "underlay_ip" => %underlay_group.multicast_ip, - "sources" => ?external_group.source_ips, - ); - - let (underlay_response, external_response) = dataplane - .update_groups(GroupUpdateParams { - external_group: &external_group, - underlay_group: &underlay_group, - new_name: external_group.name().as_str(), - new_sources: &external_group.source_ips, - }) - .await - .map_err(ActionError::action_failed)?; - - info!( - osagactx.log(), - "successfully updated multicast groups in DPD across switches"; - "external_group_id" => %external_group.id(), - "underlay_group_id" => %underlay_group.id, - "group_name" => external_group.name().as_str() - ); - - Ok(DataplaneUpdateResponse { - underlay: underlay_response, - external: external_response, - }) -} - -/// Roll back multicast group updates by removing groups from DPD. -async fn mgu_rollback_dataplane( - sagactx: NexusActionContext, -) -> Result<(), anyhow::Error> { - let osagactx = sagactx.user_data(); - let params = sagactx.saga_params::()?; - - let (external_group, _) = sagactx - .lookup::<(MulticastGroup, UnderlayMulticastGroup)>("group_data")?; - - let multicast_tag = external_group.name().to_string(); - - let dataplane = MulticastDataplaneClient::new( - osagactx.nexus().resolver().clone(), - osagactx.log().clone(), - ) - .await - .map_err(ActionError::action_failed)?; - - debug!( - osagactx.log(), - "rolling back multicast additions"; - "external_group_id" => %params.external_group_id, - "underlay_group_id" => %params.underlay_group_id, - "tag" => %multicast_tag, - "external_group_name" => external_group.name().as_str(), - ); - - dataplane - .remove_groups(&multicast_tag) - .await - .context("failed to cleanup multicast groups during saga rollback")?; - - debug!( - osagactx.log(), - "completed rollback of multicast configuration"; - "tag" => %multicast_tag - ); - - Ok(()) -} - -#[cfg(test)] -mod test { - use super::*; - use crate::app::saga::create_saga_dag; - use crate::app::sagas::test_helpers; - use nexus_db_queries::authn::saga::Serialized; - use nexus_test_utils_macros::nexus_test; - - type ControlPlaneTestContext = - nexus_test_utils::ControlPlaneTestContext; - - fn new_test_params(opctx: &nexus_db_queries::context::OpContext) -> Params { - Params { - serialized_authn: Serialized::for_opctx(opctx), - external_group_id: Uuid::new_v4(), - underlay_group_id: Uuid::new_v4(), - } - } - - #[nexus_test(server = crate::Server)] - async fn test_saga_dag_structure(cptestctx: &ControlPlaneTestContext) { - let opctx = test_helpers::test_opctx(cptestctx); - let params = new_test_params(&opctx); - let dag = - create_saga_dag::(params).unwrap(); - - // Verify the DAG has the expected structure - let nodes: Vec<_> = dag.get_nodes().collect(); - assert!(nodes.len() >= 2); // Should have at least our 2 main actions - - // Verify expected node labels exist - let node_labels: std::collections::HashSet<_> = - nodes.iter().map(|node| node.label()).collect(); - - assert!(node_labels.contains("FetchGroupData")); - assert!(node_labels.contains("UpdateDataplane")); - } -} diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 2c08c457fb0..d16604e7ccf 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -8,8 +8,8 @@ use super::{ console_api, params, views::{ self, Certificate, FloatingIp, Group, IdentityProvider, Image, IpPool, - IpPoolRange, MulticastGroup, PhysicalDisk, Project, Rack, Silo, - SiloQuotas, SiloUtilization, Sled, Snapshot, SshKey, User, UserBuiltin, + IpPoolRange, PhysicalDisk, Project, Rack, Silo, SiloQuotas, + SiloUtilization, Sled, Snapshot, SshKey, User, UserBuiltin, Utilization, Vpc, VpcRouter, VpcSubnet, }, }; @@ -2390,19 +2390,34 @@ impl NexusExternalApi for NexusExternalApiImpl { .await } - async fn multicast_group_create( + async fn v2025120300_multicast_group_create( rqctx: RequestContext, - group_params: TypedBody, + new_group: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let create_params = group_params.into_inner(); + let nexus = &apictx.context.nexus; + let v1_params = new_group.into_inner(); + + // v1 (v2025120300) allowed explicit pool selection, but the new + // implicit lifecycle auto-selects SSM/ASM pool based on `multicast_ip`. + // If pool is provided without `multicast_ip`, return an error. + if v1_params.pool.is_some() && v1_params.multicast_ip.is_none() { + return Err(HttpError::for_bad_request( + None, + "explicit pool selection requires multicast_ip; \ + pool is auto-selected based on IP address range" + .to_string(), + )); + } + // Convert v1 params to internal params + let internal_params: nexus_types::multicast::MulticastGroupCreate = + v1_params.into(); let group = - nexus.multicast_group_create(&opctx, &create_params).await?; + nexus.multicast_group_create(&opctx, &internal_params).await?; Ok(HttpResponseCreated(views::MulticastGroup::try_from(group)?)) }; apictx @@ -2412,23 +2427,24 @@ impl NexusExternalApi for NexusExternalApiImpl { .await } - async fn multicast_group_view( + async fn v2025120300_multicast_group_view( rqctx: RequestContext, - path_params: Path, + path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { + let path: params::MulticastGroupPath = + path_params.into_inner().into(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); let group_selector = params::MulticastGroupSelector { - multicast_group: path.multicast_group.clone(), + multicast_group: path.multicast_group, }; - let group_lookup = - nexus.multicast_group_lookup(&opctx, &group_selector)?; - let group = - nexus.multicast_group_fetch(&opctx, &group_lookup).await?; + let group = apictx + .context + .nexus + .multicast_group_view(&opctx, &group_selector) + .await?; Ok(HttpResponseOk(views::MulticastGroup::try_from(group)?)) }; apictx @@ -2438,29 +2454,22 @@ impl NexusExternalApi for NexusExternalApiImpl { .await } - async fn multicast_group_update( + async fn multicast_group_view( rqctx: RequestContext, path_params: Path, - updated_group: TypedBody, - ) -> Result, HttpError> { + ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.context.nexus; let path = path_params.into_inner(); - let updated_group_params = updated_group.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let group_selector = params::MulticastGroupSelector { - multicast_group: path.multicast_group.clone(), + multicast_group: path.multicast_group, }; - let group_lookup = - nexus.multicast_group_lookup(&opctx, &group_selector)?; - let group = nexus - .multicast_group_update( - &opctx, - &group_lookup, - &updated_group_params, - ) + let group = apictx + .context + .nexus + .multicast_group_view(&opctx, &group_selector) .await?; Ok(HttpResponseOk(views::MulticastGroup::try_from(group)?)) }; @@ -2471,50 +2480,84 @@ impl NexusExternalApi for NexusExternalApiImpl { .await } - async fn multicast_group_delete( - rqctx: RequestContext, - path_params: Path, + async fn v2025120300_multicast_group_update( + _rqctx: RequestContext, + _path_params: Path, + _update_params: TypedBody, + ) -> Result, HttpError> { + // Multicast group update is deprecated in the implicit lifecycle model. + // Groups are now created implicitly when members join and deleted when + // all members leave. Properties like `source_ips` should be set when + // adding SSM members. + Err(HttpError::for_bad_request( + None, + "multicast group update is deprecated; groups are managed \ + implicitly through member operations" + .to_string(), + )) + } + + async fn v2025120300_multicast_group_delete( + _rqctx: RequestContext, + _path_params: Path, ) -> Result { - let apictx = rqctx.context(); - let handler = async { - let opctx = - crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let group_selector = params::MulticastGroupSelector { - multicast_group: path.multicast_group.clone(), - }; - let group_lookup = - nexus.multicast_group_lookup(&opctx, &group_selector)?; - nexus.multicast_group_delete(&opctx, &group_lookup).await?; - Ok(HttpResponseDeleted()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await + // Multicast group deletion is deprecated in the implicit lifecycle + // model. Groups are automatically deleted when all members leave. + // To remove a group, remove all its members. + Err(HttpError::for_bad_request( + None, + "multicast group deletion is deprecated; groups are \ + automatically deleted when all members leave" + .to_string(), + )) } - async fn lookup_multicast_group_by_ip( + // Multicast Group Member Management + + async fn v2025120300_multicast_group_member_list( rqctx: RequestContext, - path_params: Path, - ) -> Result, HttpError> { + path_params: Path, + query_params: Query, + ) -> Result< + HttpResponseOk>, + HttpError, + > { let apictx = rqctx.context(); let handler = async { + let path: params::MulticastGroupPath = + path_params.into_inner().into(); + let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - - let ip_addr = path.address; - - // System endpoint requires fleet-level read authorization - opctx.authorize(authz::Action::Read, &authz::FLEET).await?; - - let group = - nexus.multicast_group_lookup_by_ip(&opctx, ip_addr).await?; - Ok(HttpResponseOk(views::MulticastGroup::try_from(group)?)) + let pag_params = data_page_params_for(&rqctx, &query)?; + let group_selector = params::MulticastGroupSelector { + multicast_group: path.multicast_group, + }; + let group_lookup = apictx + .context + .nexus + .multicast_group_lookup(&opctx, &group_selector) + .await?; + let members = apictx + .context + .nexus + .multicast_group_members_list( + &opctx, + &group_lookup, + &pag_params, + ) + .await?; + let results: Vec = members + .into_iter() + .map(|m| { + views::MulticastGroupMember::try_from(m).map(Into::into) + }) + .collect::, _>>()?; + Ok(HttpResponseOk(ScanById::results_page( + &query, + results, + &|_, m: &v2025120300::MulticastGroupMember| m.id, + )?)) }; apictx .context @@ -2523,8 +2566,6 @@ impl NexusExternalApi for NexusExternalApiImpl { .await } - // Multicast Group Member Management - async fn multicast_group_member_list( rqctx: RequestContext, path_params: Path, @@ -2535,36 +2576,36 @@ impl NexusExternalApi for NexusExternalApiImpl { > { let apictx = rqctx.context(); let handler = async { - let opctx = - crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; let pag_params = data_page_params_for(&rqctx, &query)?; - let group_selector = params::MulticastGroupSelector { multicast_group: path.multicast_group, }; - let group_lookup = - nexus.multicast_group_lookup(&opctx, &group_selector)?; - - let members = nexus + let group_lookup = apictx + .context + .nexus + .multicast_group_lookup(&opctx, &group_selector) + .await?; + let members = apictx + .context + .nexus .multicast_group_members_list( &opctx, &group_lookup, &pag_params, ) .await?; - let results = members .into_iter() .map(views::MulticastGroupMember::try_from) .collect::, _>>()?; - Ok(HttpResponseOk(ScanById::results_page( &query, results, - &|_, member: &views::MulticastGroupMember| member.identity.id, + &|_, m: &views::MulticastGroupMember| m.identity.id, )?)) }; apictx @@ -2574,6 +2615,54 @@ impl NexusExternalApi for NexusExternalApiImpl { .await } + async fn v2025120300_multicast_group_member_add( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + member_params: TypedBody, + ) -> Result, HttpError> + { + let apictx = rqctx.context(); + let handler = async { + let path: params::MulticastGroupPath = + path_params.into_inner().into(); + let member: params::MulticastGroupMemberAdd = + member_params.into_inner().into(); + let query = query_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let instance_lookup = apictx.context.nexus.instance_lookup( + &opctx, + params::InstanceSelector { + project: match &member.instance { + NameOrId::Name(_) => query.project.clone(), + NameOrId::Id(_) => None, + }, + instance: member.instance.clone(), + }, + )?; + let result = apictx + .context + .nexus + .instance_join_multicast_group( + &opctx, + &path.multicast_group, + &instance_lookup, + &member.source_ips, + ) + .await?; + let view = views::MulticastGroupMember::try_from(result)?; + Ok(HttpResponseCreated(v2025120300::MulticastGroupMember::from( + view, + ))) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + async fn multicast_group_member_add( rqctx: RequestContext, path_params: Path, @@ -2583,37 +2672,33 @@ impl NexusExternalApi for NexusExternalApiImpl { { let apictx = rqctx.context(); let handler = async { - let opctx = - crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; let path = path_params.into_inner(); + let member = member_params.into_inner(); let query = query_params.into_inner(); - let member_params = member_params.into_inner(); - - let group_selector = params::MulticastGroupSelector { - multicast_group: path.multicast_group, - }; - let group_lookup = - nexus.multicast_group_lookup(&opctx, &group_selector)?; - - let instance_lookup = nexus.instance_lookup( + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let instance_lookup = apictx.context.nexus.instance_lookup( &opctx, params::InstanceSelector { - project: query.project, - instance: member_params.instance, + project: match &member.instance { + NameOrId::Name(_) => query.project.clone(), + NameOrId::Id(_) => None, + }, + instance: member.instance.clone(), }, )?; - - let member = nexus - .multicast_group_member_attach( + let result = apictx + .context + .nexus + .instance_join_multicast_group( &opctx, - &group_lookup, + &path.multicast_group, &instance_lookup, + &member.source_ips, ) .await?; - Ok(HttpResponseCreated(views::MulticastGroupMember::try_from( - member, + result, )?)) }; apictx @@ -2623,41 +2708,92 @@ impl NexusExternalApi for NexusExternalApiImpl { .await } - async fn multicast_group_member_remove( + async fn v2025120300_multicast_group_member_remove( rqctx: RequestContext, - path_params: Path, + path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { + let path: params::MulticastGroupMemberPath = + path_params.into_inner().into(); + let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - + let instance_lookup = apictx.context.nexus.instance_lookup( + &opctx, + params::InstanceSelector { + project: match &path.instance { + NameOrId::Name(_) => query.project, + NameOrId::Id(_) => None, + }, + instance: path.instance.clone(), + }, + )?; let group_selector = params::MulticastGroupSelector { multicast_group: path.multicast_group, }; - let group_lookup = - nexus.multicast_group_lookup(&opctx, &group_selector)?; + let group_lookup = apictx + .context + .nexus + .multicast_group_lookup(&opctx, &group_selector) + .await?; + apictx + .context + .nexus + .instance_leave_multicast_group( + &opctx, + &group_lookup, + &instance_lookup, + ) + .await?; + Ok(HttpResponseDeleted()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - let instance_lookup = nexus.instance_lookup( + async fn multicast_group_member_remove( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let instance_lookup = apictx.context.nexus.instance_lookup( &opctx, params::InstanceSelector { - project: query.project, - instance: path.instance, + project: match &path.instance { + NameOrId::Name(_) => query.project, + NameOrId::Id(_) => None, + }, + instance: path.instance.clone(), }, )?; - - nexus - .multicast_group_member_detach( + let group_selector = params::MulticastGroupSelector { + multicast_group: path.multicast_group, + }; + let group_lookup = apictx + .context + .nexus + .multicast_group_lookup(&opctx, &group_selector) + .await?; + apictx + .context + .nexus + .instance_leave_multicast_group( &opctx, &group_lookup, &instance_lookup, ) .await?; - Ok(HttpResponseDeleted()) }; apictx @@ -2667,6 +2803,34 @@ impl NexusExternalApi for NexusExternalApiImpl { .await } + async fn v2025120300_lookup_multicast_group_by_ip( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + // Use the IP address to look up the group by converting to + // MulticastGroupIdentifier + let group_selector = params::MulticastGroupSelector { + multicast_group: params::MulticastGroupIdentifier::Ip( + path.address, + ), + }; + let group = + nexus.multicast_group_view(&opctx, &group_selector).await?; + Ok(HttpResponseOk(views::MulticastGroup::try_from(group)?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + // Disks async fn disk_list( @@ -2964,6 +3128,8 @@ impl NexusExternalApi for NexusExternalApiImpl { new_instance: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); + let project_selector = query_params.into_inner(); + let new_instance_params = new_instance.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -2971,8 +3137,6 @@ impl NexusExternalApi for NexusExternalApiImpl { let audit = nexus.audit_log_entry_init(&opctx, &rqctx).await?; let result = async { - let project_selector = query_params.into_inner(); - let new_instance_params = &new_instance.into_inner(); let project_lookup = nexus.project_lookup(&opctx, project_selector)?; let instance = nexus @@ -3073,13 +3237,13 @@ impl NexusExternalApi for NexusExternalApiImpl { rqctx: RequestContext, query_params: Query, path_params: Path, - reconfigure_params: TypedBody, + instance_config: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); let query = query_params.into_inner(); - let reconfigure_params = reconfigure_params.into_inner(); + let path = path_params.into_inner(); + let instance_config = instance_config.into_inner(); let instance_selector = params::InstanceSelector { project: query.project, instance: path.instance, @@ -3093,7 +3257,7 @@ impl NexusExternalApi for NexusExternalApiImpl { .instance_reconfigure( &opctx, &instance_lookup, - &reconfigure_params, + &instance_config, ) .await?; Ok(HttpResponseOk(instance.into())) @@ -5744,8 +5908,14 @@ impl NexusExternalApi for NexusExternalApiImpl { let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); + + // Note: When instance is specified by UUID, project should be `None` + // (UUIDs are globally unique). Project is only needed for name-based lookup. let instance_selector = params::InstanceSelector { - project: query.project, + project: match &path.instance { + NameOrId::Name(_) => query.project, + NameOrId::Id(_) => None, + }, instance: path.instance, }; let instance_lookup = @@ -5765,44 +5935,140 @@ impl NexusExternalApi for NexusExternalApiImpl { .await } + async fn v2025120300_instance_multicast_group_join( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError> + { + let apictx = rqctx.context(); + let handler = async { + let path: params::InstanceMulticastGroupPath = + path_params.into_inner().into(); + let query = query_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let instance_selector = params::InstanceSelector { + project: match &path.instance { + NameOrId::Name(_) => query.project.clone(), + NameOrId::Id(_) => None, + }, + instance: path.instance.clone(), + }; + let instance_lookup = apictx + .context + .nexus + .instance_lookup(&opctx, instance_selector)?; + // v1 (v2025120300) didn't have a body parameter, default to no `source_ips` + let result = apictx + .context + .nexus + .instance_join_multicast_group( + &opctx, + &path.multicast_group, + &instance_lookup, + &None, + ) + .await?; + let view = views::MulticastGroupMember::try_from(result)?; + Ok(HttpResponseCreated(v2025120300::MulticastGroupMember::from( + view, + ))) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + async fn instance_multicast_group_join( rqctx: RequestContext, path_params: Path, query_params: Query, + body_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let opctx = - crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); - + let body = body_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; let instance_selector = params::InstanceSelector { - project: query.project.clone(), - instance: path.instance, + project: match &path.instance { + NameOrId::Name(_) => query.project.clone(), + NameOrId::Id(_) => None, + }, + instance: path.instance.clone(), }; - let instance_lookup = - nexus.instance_lookup(&opctx, instance_selector)?; + let instance_lookup = apictx + .context + .nexus + .instance_lookup(&opctx, instance_selector)?; + let result = apictx + .context + .nexus + .instance_join_multicast_group( + &opctx, + &path.multicast_group, + &instance_lookup, + &body.source_ips, + ) + .await?; + Ok(HttpResponseCreated(views::MulticastGroupMember::try_from( + result, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + async fn v2025120300_instance_multicast_group_leave( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let path: params::InstanceMulticastGroupPath = + path_params.into_inner().into(); + let query = query_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let instance_selector = params::InstanceSelector { + project: match &path.instance { + NameOrId::Name(_) => query.project.clone(), + NameOrId::Id(_) => None, + }, + instance: path.instance.clone(), + }; + let instance_lookup = apictx + .context + .nexus + .instance_lookup(&opctx, instance_selector)?; let group_selector = params::MulticastGroupSelector { multicast_group: path.multicast_group, }; - let group_lookup = - nexus.multicast_group_lookup(&opctx, &group_selector)?; - - let member = nexus - .multicast_group_member_attach( + let group_lookup = apictx + .context + .nexus + .multicast_group_lookup(&opctx, &group_selector) + .await?; + apictx + .context + .nexus + .instance_leave_multicast_group( &opctx, &group_lookup, &instance_lookup, ) .await?; - - Ok(HttpResponseCreated(views::MulticastGroupMember::try_from( - member, - )?)) + Ok(HttpResponseDeleted()) }; apictx .context @@ -5818,27 +6084,33 @@ impl NexusExternalApi for NexusExternalApiImpl { ) -> Result { let apictx = rqctx.context(); let handler = async { - let opctx = - crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); - + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; let instance_selector = params::InstanceSelector { - project: query.project.clone(), - instance: path.instance, + project: match &path.instance { + NameOrId::Name(_) => query.project.clone(), + NameOrId::Id(_) => None, + }, + instance: path.instance.clone(), }; - let instance_lookup = - nexus.instance_lookup(&opctx, instance_selector)?; - + let instance_lookup = apictx + .context + .nexus + .instance_lookup(&opctx, instance_selector)?; let group_selector = params::MulticastGroupSelector { multicast_group: path.multicast_group, }; - let group_lookup = - nexus.multicast_group_lookup(&opctx, &group_selector)?; - - nexus - .multicast_group_member_detach( + let group_lookup = apictx + .context + .nexus + .multicast_group_lookup(&opctx, &group_selector) + .await?; + apictx + .context + .nexus + .instance_leave_multicast_group( &opctx, &group_lookup, &instance_lookup, diff --git a/nexus/test-utils/src/nexus_test.rs b/nexus/test-utils/src/nexus_test.rs index fc65da7b362..625d90cfdc6 100644 --- a/nexus/test-utils/src/nexus_test.rs +++ b/nexus/test-utils/src/nexus_test.rs @@ -227,6 +227,65 @@ impl ControlPlaneTestContext { } } + /// Restart a Dendrite instance for testing drift correction scenarios. + /// + /// Simulates a switch restart where DPD loses its programmed state. + /// Restarts on the same port so test DNS stays valid. + pub async fn restart_dendrite( + &self, + switch_location: omicron_common::api::external::SwitchLocation, + ) { + let mut old = self + .dendrite + .write() + .unwrap() + .remove(&switch_location) + .expect("Dendrite should be running"); + let port = old.port; + old.cleanup().await.unwrap(); + + let mgs = self.gateway.get(&switch_location).unwrap(); + let mgs_addr = std::net::SocketAddrV6::new( + std::net::Ipv6Addr::LOCALHOST, + mgs.port, + 0, + 0, + ) + .into(); + + let dendrite = + omicron_test_utils::dev::dendrite::DendriteInstance::start( + port, + Some(self.internal_client.bind_address), + Some(mgs_addr), + ) + .await + .unwrap(); + + // Wait for Dendrite to be ready before returning. + // We check `switch_identifiers()` rather than just `dpd_uptime()` + // because Nexus needs switch_identifiers to work to determine which + // switch to program. + let dpd_client = dpd_client::Client::new( + &format!("http://[::1]:{port}"), + dpd_client::ClientState { + tag: String::from("test-restart-wait"), + log: self.logctx.log.clone(), + }, + ); + loop { + match dpd_client.switch_identifiers().await { + Ok(_) => break, + Err(_) => { + tokio::time::sleep(std::time::Duration::from_millis(50)) + .await; + } + } + } + + self.dendrite.write().unwrap().insert(switch_location, dendrite); + } + pub async fn teardown(mut self) { self.server.close().await; self.database.cleanup().await.unwrap(); diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index e80ad7b3fdb..b8b9fad9a9d 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -706,7 +706,7 @@ pub async fn create_instance( Default::default(), None, // Multicast groups= - Vec::::new(), + Vec::::new(), ) .await } @@ -724,7 +724,7 @@ pub async fn create_instance_with( start: bool, auto_restart_policy: Option, cpu_platform: Option, - multicast_groups: Vec, + multicast_groups: Vec, ) -> Instance { let url = format!("/v1/instances?project={}", project_name); diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index 92a09964937..9c94eb2113d 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -789,35 +789,15 @@ pub static DEMO_INSTANCE_MULTICAST_GROUP_JOIN_URL: LazyLock = *DEMO_INSTANCE_NAME, *DEMO_MULTICAST_GROUP_NAME, *DEMO_PROJECT_NAME ) }); -pub static DEMO_MULTICAST_GROUP_BY_IP_URL: LazyLock = - LazyLock::new(|| { - "/v1/system/multicast-groups/by-ip/224.0.1.100".to_string() - }); -pub static DEMO_MULTICAST_GROUP_CREATE: LazyLock = - LazyLock::new(|| params::MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: DEMO_MULTICAST_GROUP_NAME.clone(), - description: String::from("demo multicast group"), - }, - multicast_ip: Some("224.0.1.100".parse().unwrap()), - pool: Some(DEMO_MULTICAST_IP_POOL_NAME.clone().into()), - source_ips: Some(Vec::new()), - mvlan: None, - }); -pub static DEMO_MULTICAST_GROUP_UPDATE: LazyLock = - LazyLock::new(|| params::MulticastGroupUpdate { - identity: IdentityMetadataUpdateParams { - name: None, - description: Some("updated description".to_string()), - }, - source_ips: Some(Vec::new()), - mvlan: None, - }); pub static DEMO_MULTICAST_MEMBER_ADD: LazyLock< params::MulticastGroupMemberAdd, > = LazyLock::new(|| params::MulticastGroupMemberAdd { instance: DEMO_INSTANCE_NAME.clone().into(), + source_ips: None, }); +pub static DEMO_INSTANCE_MULTICAST_GROUP_JOIN: LazyLock< + params::InstanceMulticastGroupJoin, +> = LazyLock::new(|| params::InstanceMulticastGroupJoin { source_ips: None }); // Switch port settings and status pub const DEMO_SWITCH_PORT_URL: &'static str = @@ -1050,10 +1030,11 @@ pub static DEMO_MULTICAST_IP_POOL_SILOS_URL: LazyLock = LazyLock::new(|| format!("{}/silos", *DEMO_MULTICAST_IP_POOL_URL)); pub static DEMO_MULTICAST_IP_POOL_RANGE: LazyLock = LazyLock::new(|| { + // Use 224.1.0.x to avoid reserved addresses in 224.0.1.x (PTP, NTP, etc.) IpRange::V4( Ipv4Range::new( - Ipv4Addr::new(224, 0, 1, 100), - Ipv4Addr::new(224, 0, 1, 200), + Ipv4Addr::new(224, 1, 0, 100), + Ipv4Addr::new(224, 1, 0, 200), ) .unwrap(), ) @@ -3165,31 +3146,21 @@ pub static VERIFY_ENDPOINTS: LazyLock> = LazyLock::new( // Multicast groups - // Multicast groups are fleet-scoped and allow any authenticated user - // (including unprivileged) to create, read, modify, and delete groups - // to enable cross-project and cross-silo multicast communication. + // Multicast groups are fleet-scoped. Any authenticated user in + // their fleet can list/read groups. Member operations require + // Instance::Modify permission on the instance being attached. + // Groups are created/deleted implicitly via member add/remove. VerifyEndpoint { url: &MULTICAST_GROUPS_URL, visibility: Visibility::Public, unprivileged_access: UnprivilegedAccess::Full, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Post( - serde_json::to_value(&*DEMO_MULTICAST_GROUP_CREATE).unwrap(), - ), - ], + allowed_methods: vec![AllowedMethod::Get], }, VerifyEndpoint { url: &DEMO_MULTICAST_GROUP_URL, visibility: Visibility::Public, unprivileged_access: UnprivilegedAccess::Full, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Put( - serde_json::to_value(&*DEMO_MULTICAST_GROUP_UPDATE).unwrap(), - ), - AllowedMethod::Delete, - ], + allowed_methods: vec![AllowedMethod::Get], }, // Multicast member endpoints have asymmetric authorization: // - GET operations only check fleet-scoped group Read permission (accessible to all authenticated users) @@ -3232,16 +3203,10 @@ pub static VERIFY_ENDPOINTS: LazyLock> = LazyLock::new( visibility: Visibility::Protected, unprivileged_access: UnprivilegedAccess::None, allowed_methods: vec![ - AllowedMethod::Put(serde_json::to_value(()).unwrap()), + AllowedMethod::Put(serde_json::to_value(&*DEMO_INSTANCE_MULTICAST_GROUP_JOIN).unwrap()), AllowedMethod::Delete, ], }, - VerifyEndpoint { - url: &DEMO_MULTICAST_GROUP_BY_IP_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get], - }, // Audit log VerifyEndpoint { url: &AUDIT_LOG_URL, diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index 586764bb2a2..09f37ec0796 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -4,8 +4,6 @@ //! Integration tests for operating on IP Pools -use std::net::Ipv4Addr; - use crate::integration_tests::instances::create_project_and_pool; use crate::integration_tests::instances::instance_wait_for_state; use dropshot::HttpErrorResponseBody; @@ -1231,6 +1229,239 @@ async fn test_ip_pool_multicast_range_rejects_v6( assert_eq!(error.message, "IPv6 ranges are not allowed yet"); } +/// Test that multicast pools reject reserved IPv4 multicast address ranges. +/// +/// This test ensures operators receive immediate feedback when configuring +/// IP pools, preventing users from encountering Dendrite errors later when +/// allocating addresses for multicast groups. +/// +/// TODO: Add IPv6 reserved range tests (ff00::/16 reserved-scope, ff01::/16 +/// interface-local, ff02::/16 link-local) once IPv6 multicast support is +/// enabled. The validation code exists and matches Dendrite's validation +/// (see dendrite/dpd/src/mcast/validate.rs). +#[nexus_test] +async fn test_ip_pool_multicast_rejects_reserved_ranges( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + + // Create a multicast pool + let pool_params = IpPoolCreate::new_multicast( + IdentityMetadataCreateParams { + name: "mcast-reserved-test".parse().unwrap(), + description: "Test rejection of reserved multicast ranges" + .to_string(), + }, + IpVersion::V4, + ); + object_create::<_, IpPool>(client, "/v1/system/ip-pools", &pool_params) + .await; + + let add_url = "/v1/system/ip-pools/mcast-reserved-test/ranges/add"; + + // IPv4 link-local multicast (224.0.0.0/24) should be rejected + let link_local_range = IpRange::V4( + Ipv4Range::new( + std::net::Ipv4Addr::new(224, 0, 0, 10), + std::net::Ipv4Addr::new(224, 0, 0, 20), + ) + .unwrap(), + ); + let error = object_create_error( + client, + add_url, + &link_local_range, + StatusCode::BAD_REQUEST, + ) + .await; + assert!(error.message.contains("link-local multicast")); + + // IPv4 GLOP multicast (233.0.0.0/8) should be rejected + let glop_range = IpRange::V4( + Ipv4Range::new( + std::net::Ipv4Addr::new(233, 1, 0, 1), + std::net::Ipv4Addr::new(233, 1, 0, 10), + ) + .unwrap(), + ); + let error = object_create_error( + client, + add_url, + &glop_range, + StatusCode::BAD_REQUEST, + ) + .await; + assert!(error.message.contains("GLOP multicast")); + + // IPv4 admin-scoped multicast (239.0.0.0/8) should be rejected + let admin_scoped_range = IpRange::V4( + Ipv4Range::new( + std::net::Ipv4Addr::new(239, 10, 0, 1), + std::net::Ipv4Addr::new(239, 10, 0, 10), + ) + .unwrap(), + ); + let error = object_create_error( + client, + add_url, + &admin_scoped_range, + StatusCode::BAD_REQUEST, + ) + .await; + assert!(error.message.contains("administratively scoped multicast")); + + // Valid ASM range (225.0.0.0 - 231.255.255.255) should succeed + let valid_asm_range = IpRange::V4( + Ipv4Range::new( + std::net::Ipv4Addr::new(225, 1, 0, 1), + std::net::Ipv4Addr::new(225, 1, 0, 10), + ) + .unwrap(), + ); + object_create::<_, IpPoolRange>(client, add_url, &valid_asm_range).await; + + // Ranges that touch reserved boundaries should be rejected + // Range starting in valid space but ending in link-local + let boundary_range_low = IpRange::V4( + Ipv4Range::new( + std::net::Ipv4Addr::new(224, 0, 0, 0), + std::net::Ipv4Addr::new(224, 0, 1, 0), + ) + .unwrap(), + ); + let error = object_create_error( + client, + add_url, + &boundary_range_low, + StatusCode::BAD_REQUEST, + ) + .await; + assert!(error.message.contains("link-local multicast")); + + // Range touching GLOP boundary + let boundary_range_glop = IpRange::V4( + Ipv4Range::new( + std::net::Ipv4Addr::new(233, 0, 0, 1), + std::net::Ipv4Addr::new(233, 255, 255, 255), + ) + .unwrap(), + ); + let error = object_create_error( + client, + add_url, + &boundary_range_glop, + StatusCode::BAD_REQUEST, + ) + .await; + assert!(error.message.contains("GLOP multicast")); +} + +/// Test that multicast pools reject specific reserved IPv4 multicast addresses. +/// +/// These addresses (NTP, Cisco Auto-RP, PTP) are individually reserved and +/// cannot be used even though they fall within otherwise valid ranges. +/// This validation aligns with Dendrite's (i.e. DPD's) specific address +/// rejection. +#[nexus_test] +async fn test_ip_pool_multicast_rejects_specific_reserved_addresses( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + + // Create a multicast pool + let pool_params = IpPoolCreate::new_multicast( + IdentityMetadataCreateParams { + name: "mcast-specific-reserved-test".parse().unwrap(), + description: + "Test rejection of specific reserved multicast addresses" + .to_string(), + }, + IpVersion::V4, + ); + object_create::<_, IpPool>(client, "/v1/system/ip-pools", &pool_params) + .await; + + let add_url = "/v1/system/ip-pools/mcast-specific-reserved-test/ranges/add"; + + // NTP (224.0.1.1) should be rejected + let ntp_range = IpRange::V4( + Ipv4Range::new( + std::net::Ipv4Addr::new(224, 0, 1, 0), + std::net::Ipv4Addr::new(224, 0, 1, 10), + ) + .unwrap(), + ); + let error = object_create_error( + client, + add_url, + &ntp_range, + StatusCode::BAD_REQUEST, + ) + .await; + assert!( + error.message.contains("224.0.1.1"), + "Expected error about NTP address 224.0.1.1, got: {}", + error.message + ); + + // Cisco Auto-RP-Announce (224.0.1.39) should be rejected + let cisco_announce_range = IpRange::V4( + Ipv4Range::new( + std::net::Ipv4Addr::new(224, 0, 1, 35), + std::net::Ipv4Addr::new(224, 0, 1, 45), + ) + .unwrap(), + ); + let error = object_create_error( + client, + add_url, + &cisco_announce_range, + StatusCode::BAD_REQUEST, + ) + .await; + assert!( + error.message.contains("224.0.1.39") + || error.message.contains("224.0.1.40"), + "Expected error about Cisco Auto-RP address, got: {}", + error.message + ); + + // PTP-primary (224.0.1.129) should be rejected + let ptp_range = IpRange::V4( + Ipv4Range::new( + std::net::Ipv4Addr::new(224, 0, 1, 125), + std::net::Ipv4Addr::new(224, 0, 1, 135), + ) + .unwrap(), + ); + let error = object_create_error( + client, + add_url, + &ptp_range, + StatusCode::BAD_REQUEST, + ) + .await; + assert!( + error.message.contains("224.0.1.129") + || error.message.contains("224.0.1.130") + || error.message.contains("224.0.1.131") + || error.message.contains("224.0.1.132"), + "Expected error about PTP address, got: {}", + error.message + ); + + // Range that avoids all specific reserved addresses should succeed + // 224.0.1.50 - 224.0.1.100 is safe (between Cisco Auto-RP and PTP) + let valid_range = IpRange::V4( + Ipv4Range::new( + std::net::Ipv4Addr::new(224, 0, 1, 50), + std::net::Ipv4Addr::new(224, 0, 1, 100), + ) + .unwrap(), + ); + object_create::<_, IpPoolRange>(client, add_url, &valid_range).await; +} + #[nexus_test] async fn test_ip_pool_range_pagination(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; @@ -1338,8 +1569,11 @@ async fn test_ip_pool_list_in_silo(cptestctx: &ControlPlaneTestContext) { // create other pool and link to silo let other_pool_range = IpRange::V4( - Ipv4Range::new(Ipv4Addr::new(10, 1, 0, 1), Ipv4Addr::new(10, 1, 0, 5)) - .unwrap(), + Ipv4Range::new( + std::net::Ipv4Addr::new(10, 1, 0, 1), + std::net::Ipv4Addr::new(10, 1, 0, 5), + ) + .unwrap(), ); let other_name = "other-pool"; create_ip_pool(&client, other_name, Some(other_pool_range)).await; @@ -1347,8 +1581,11 @@ async fn test_ip_pool_list_in_silo(cptestctx: &ControlPlaneTestContext) { // create third pool and don't link to silo let unlinked_pool_range = IpRange::V4( - Ipv4Range::new(Ipv4Addr::new(10, 2, 0, 1), Ipv4Addr::new(10, 2, 0, 5)) - .unwrap(), + Ipv4Range::new( + std::net::Ipv4Addr::new(10, 2, 0, 1), + std::net::Ipv4Addr::new(10, 2, 0, 5), + ) + .unwrap(), ); let unlinked_name = "unlinked-pool"; create_ip_pool(&client, unlinked_name, Some(unlinked_pool_range)).await; diff --git a/nexus/tests/integration_tests/multicast/api.rs b/nexus/tests/integration_tests/multicast/api.rs index bcda0eafe3a..7b30ebfbd47 100644 --- a/nexus/tests/integration_tests/multicast/api.rs +++ b/nexus/tests/integration_tests/multicast/api.rs @@ -6,23 +6,31 @@ //! Tests for multicast API behavior and functionality. //! -//! This module tests various aspects of multicast group membership APIs, including: +//! This module tests multicast group membership APIs including: //! -//! - Stopped instance handling -//! - Idempotency behavior -//! - API consistency +//! - Stopped instance handling: Members in "Left" state, reconciler transitions +//! - Idempotency: Duplicate join operations succeed without creating duplicates +//! - UUID-based access: Fleet-scoped operations without project parameter +//! - Join-by-IP: Implicit group creation when joining by multicast IP +//! - ASM (Any-Source): 224.0.0.0/4 except 232.0.0.0/8, no source filtering +//! - SSM (Source-Specific): 232.0.0.0/8, requires source IPs +//! - Source IP validation: Mismatch detection, ASM/SSM compatibility +//! - Pool validation: IP must be in a linked multicast pool + +use std::net::IpAddr; use http::{Method, StatusCode}; + use nexus_test_utils::http_testing::{AuthnMode, NexusRequest, RequestBuilder}; use nexus_test_utils::resource_helpers::{ - create_default_ip_pool, create_project, object_create, + create_default_ip_pool, create_instance, create_project, object_create, }; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params::{ - InstanceCreate, InstanceNetworkInterfaceAttachment, MulticastGroupCreate, - MulticastGroupMemberAdd, + InstanceCreate, InstanceMulticastGroupJoin, + InstanceNetworkInterfaceAttachment, MulticastGroupMemberAdd, }; -use nexus_types::external_api::views::{MulticastGroup, MulticastGroupMember}; +use nexus_types::external_api::views::MulticastGroupMember; use omicron_common::api::external::{ ByteCount, IdentityMetadataCreateParams, Instance, InstanceCpuCount, NameOrId, @@ -38,32 +46,17 @@ async fn test_multicast_api_behavior(cptestctx: &ControlPlaneTestContext) { let group_name = "api-edge-cases-group"; // Setup in parallel - let (_, _, mcast_pool) = ops::join3( + let (_, _, _) = ops::join3( create_project(client, project_name), create_default_ip_pool(client), create_multicast_ip_pool(client, "api-edge-pool"), ) .await; - let group_url = "/v1/multicast-groups".to_string(); - let group_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: group_name.parse().unwrap(), - description: "Group for API edge case testing".to_string(), - }, - multicast_ip: None, // Test with auto-assigned IP - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; - - object_create::<_, MulticastGroup>(client, &group_url, &group_params).await; - wait_for_group_active(client, group_name).await; - // Case: Stopped instances (all APIs should handle stopped instances // identically) - // API Path: Instance created stopped with multicast group + // API Path: Instance created stopped, then added to group let instance1_params = InstanceCreate { identity: IdentityMetadataCreateParams { name: "edge-case-1".parse().unwrap(), @@ -76,7 +69,7 @@ async fn test_multicast_api_behavior(cptestctx: &ControlPlaneTestContext) { ssh_public_keys: None, network_interfaces: InstanceNetworkInterfaceAttachment::Default, external_ips: vec![], - multicast_groups: vec![NameOrId::Name(group_name.parse().unwrap())], + multicast_groups: vec![], // No groups at creation disks: vec![], boot_disk: None, start: false, // Create stopped @@ -89,7 +82,24 @@ async fn test_multicast_api_behavior(cptestctx: &ControlPlaneTestContext) { let instance1: Instance = object_create(client, &instance_url, &instance1_params).await; - // API Path: Instance created stopped, then added to group + // Add instance1 to group + let member_add_url = format!( + "{}?project={project_name}", + mcast_group_members_url(group_name) + ); + let member1_params = MulticastGroupMemberAdd { + instance: NameOrId::Name("edge-case-1".parse().unwrap()), + source_ips: None, + }; + object_create::<_, MulticastGroupMember>( + client, + &member_add_url, + &member1_params, + ) + .await; + wait_for_group_active(client, group_name).await; + + // API Path: Second instance created stopped, then added to existing group let instance2_params = InstanceCreate { identity: IdentityMetadataCreateParams { name: "edge-case-2".parse().unwrap(), @@ -113,18 +123,15 @@ async fn test_multicast_api_behavior(cptestctx: &ControlPlaneTestContext) { let instance2: Instance = object_create(client, &instance_url, &instance2_params).await; - // Add to group after creation - let member_add_url = format!( - "{}?project={project_name}", - mcast_group_members_url(group_name) - ); - let member_params = MulticastGroupMemberAdd { + // Add to existing group + let member2_params = MulticastGroupMemberAdd { instance: NameOrId::Name("edge-case-2".parse().unwrap()), + source_ips: None, }; object_create::<_, MulticastGroupMember>( client, &member_add_url, - &member_params, + &member2_params, ) .await; @@ -160,6 +167,7 @@ async fn test_multicast_api_behavior(cptestctx: &ControlPlaneTestContext) { // Try to add instance1 again using group member add (should be idempotent) let duplicate_member_params = MulticastGroupMemberAdd { instance: NameOrId::Name("edge-case-1".parse().unwrap()), + source_ips: None, }; // This should succeed idempotently @@ -219,7 +227,7 @@ async fn test_multicast_api_behavior(cptestctx: &ControlPlaneTestContext) { format!("/v1/instances/{instance_uuid}/multicast-groups/{group_uuid}"); let member_uuid: MulticastGroupMember = NexusRequest::new( RequestBuilder::new(client, Method::PUT, &join_url_uuid) - .body(Some(&())) + .body(Some(&InstanceMulticastGroupJoin::default())) .expect_status(Some(StatusCode::CREATED)), ) .authn_as(AuthnMode::PrivilegedUser) @@ -309,7 +317,7 @@ async fn test_multicast_api_behavior(cptestctx: &ControlPlaneTestContext) { format!("/v1/instances/not-a-uuid/multicast-groups/{group_uuid}"); NexusRequest::new( RequestBuilder::new(client, Method::PUT, &invalid_join_url) - .body(Some(&())) + .body(Some(&InstanceMulticastGroupJoin::default())) .expect_status(Some(StatusCode::BAD_REQUEST)), ) .authn_as(AuthnMode::PrivilegedUser) @@ -317,7 +325,6 @@ async fn test_multicast_api_behavior(cptestctx: &ControlPlaneTestContext) { .await .expect("Invalid UUID should return 400 Bad Request"); - // Cleanup - instance3 has already left the group above cleanup_instances( cptestctx, client, @@ -325,5 +332,527 @@ async fn test_multicast_api_behavior(cptestctx: &ControlPlaneTestContext) { &["edge-case-1", "edge-case-2", "edge-case-3"], ) .await; - cleanup_multicast_groups(client, &[group_name]).await; + wait_for_group_deleted(client, group_name).await; +} + +/// Test ASM (Any-Source Multicast) join-by-IP: instance joins by specifying +/// a multicast IP directly instead of a group name. The system finds the pool +/// containing the IP and implicitly creates the group with that explicit IP. +#[nexus_test] +async fn test_join_by_ip_asm(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + let project_name = "join-by-ip-asm-project"; + let instance_name = "join-by-ip-inst-1"; + + // Setup: project and pools + let (_, _, mcast_pool) = ops::join3( + create_project(client, project_name), + create_default_ip_pool(client), + create_multicast_ip_pool_with_range( + client, + "asm-pool", + (224, 10, 0, 1), + (224, 10, 0, 255), + ), + ) + .await; + + // Create instance + create_instance(client, project_name, instance_name).await; + + // Join by IP - use an IP from the pool range as the "group name" + let explicit_ip = "224.10.0.50"; + let join_url = format!( + "/v1/instances/{instance_name}/multicast-groups/{explicit_ip}?project={project_name}" + ); + let join_body = InstanceMulticastGroupJoin { source_ips: None }; + + let response = NexusRequest::new( + RequestBuilder::new(client, Method::PUT, &join_url) + .body(Some(&join_body)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("Request should execute"); + + if response.status != StatusCode::CREATED { + panic!( + "Join-by-IP should succeed: expected {}, got {} - body: {}", + StatusCode::CREATED, + response.status, + String::from_utf8_lossy(&response.body) + ); + } + + let member: MulticastGroupMember = + response.parsed_body().expect("Should parse member"); + + // Verify the member has the expected multicast IP + assert_eq!( + member.multicast_ip.to_string(), + explicit_ip, + "Member should have the explicit IP specified in join" + ); + + // Verify the group was implicitly created with the explicit IP + // Group name is auto-generated: "mcast-224-10-0-50" + let expected_group_name = + format!("mcast-{}", explicit_ip.replace('.', "-")); + let group = wait_for_group_active(client, &expected_group_name).await; + + assert_eq!( + group.multicast_ip.to_string(), + explicit_ip, + "Group should have the explicit multicast IP" + ); + assert_eq!( + group.ip_pool_id, mcast_pool.identity.id, + "Group should be in the ASM pool" + ); + assert!(group.source_ips.is_empty(), "ASM group should have no source IPs"); + + cleanup_instances(cptestctx, client, project_name, &[instance_name]).await; + wait_for_group_deleted(client, &expected_group_name).await; +} + +/// Test SSM (Source-Specific Multicast) join-by-IP: instance joins an SSM IP +/// (232.x.x.x) with source IPs specified. The system implicitly creates the +/// group with explicit IP and sources. +#[nexus_test] +async fn test_join_by_ip_ssm_with_sources(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + let project_name = "join-by-ip-ssm-project"; + let instance_name = "join-by-ip-ssm-inst"; + + // Setup: project and pools + let (_, _, ssm_pool) = ops::join3( + create_project(client, project_name), + create_default_ip_pool(client), + create_multicast_ip_pool_with_range( + client, + "ssm-pool", + (232, 20, 0, 1), + (232, 20, 0, 255), + ), + ) + .await; + + // Create instance + create_instance(client, project_name, instance_name).await; + + // Join by SSM IP with source IPs + let explicit_ssm_ip = "232.20.0.100"; + let source_ip: IpAddr = "10.5.5.5".parse().unwrap(); + let join_url = format!( + "/v1/instances/{instance_name}/multicast-groups/{explicit_ssm_ip}?project={project_name}" + ); + let join_body = + InstanceMulticastGroupJoin { source_ips: Some(vec![source_ip]) }; + + let member: MulticastGroupMember = NexusRequest::new( + RequestBuilder::new(client, Method::PUT, &join_url) + .body(Some(&join_body)) + .expect_status(Some(StatusCode::CREATED)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("SSM join-by-IP should succeed") + .parsed_body() + .expect("Should parse member"); + + assert_eq!( + member.multicast_ip.to_string(), + explicit_ssm_ip, + "Member should have the explicit SSM IP" + ); + + // Verify group was implicitly created with correct properties + let expected_group_name = + format!("mcast-{}", explicit_ssm_ip.replace('.', "-")); + let group = wait_for_group_active(client, &expected_group_name).await; + + assert_eq!( + group.multicast_ip.to_string(), + explicit_ssm_ip, + "Group should have the explicit SSM IP" + ); + assert_eq!( + group.ip_pool_id, ssm_pool.identity.id, + "Group should be in the SSM pool" + ); + assert_eq!(group.source_ips.len(), 1, "SSM group should have 1 source IP"); + assert_eq!( + group.source_ips[0].to_string(), + source_ip.to_string(), + "SSM group should have the specified source IP" + ); + + cleanup_instances(cptestctx, client, project_name, &[instance_name]).await; + wait_for_group_deleted(client, &expected_group_name).await; +} + +/// Test SSM join-by-IP without sources should fail. +/// SSM addresses (232.0.0.0/8) require source IPs for implicit creation. +#[nexus_test] +async fn test_join_by_ip_ssm_without_sources_fails( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + let project_name = "join-by-ip-ssm-fail-project"; + let instance_name = "join-by-ip-ssm-fail-inst"; + + // Setup + let (_, _, _ssm_pool) = ops::join3( + create_project(client, project_name), + create_default_ip_pool(client), + create_multicast_ip_pool_with_range( + client, + "ssm-fail-pool", + (232, 30, 0, 1), + (232, 30, 0, 255), + ), + ) + .await; + + create_instance(client, project_name, instance_name).await; + + // Try to join SSM IP without sources; should fail + let ssm_ip = "232.30.0.50"; + let join_url = format!( + "/v1/instances/{instance_name}/multicast-groups/{ssm_ip}?project={project_name}" + ); + let join_body = InstanceMulticastGroupJoin { + source_ips: None, // No sources! + }; + + let error = NexusRequest::new( + RequestBuilder::new(client, Method::PUT, &join_url) + .body(Some(&join_body)) + .expect_status(Some(StatusCode::BAD_REQUEST)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("SSM without sources should fail"); + + let error_body: dropshot::HttpErrorResponseBody = + error.parsed_body().unwrap(); + assert!( + error_body.message.contains("SSM") + || error_body.message.contains("source"), + "Error should mention SSM or source IPs: {}", + error_body.message + ); + + cleanup_instances(cptestctx, client, project_name, &[instance_name]).await; +} + +/// Test join-by-IP with IP not in any pool should fail. +#[nexus_test] +async fn test_join_by_ip_not_in_pool_fails( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + let project_name = "join-by-ip-nopool-project"; + let instance_name = "join-by-ip-nopool-inst"; + + // Setup: only create a pool with limited range + let (_, _, _) = ops::join3( + create_project(client, project_name), + create_default_ip_pool(client), + create_multicast_ip_pool_with_range( + client, + "limited-pool", + (224, 100, 0, 1), + (224, 100, 0, 10), // Only 10 IPs + ), + ) + .await; + + create_instance(client, project_name, instance_name).await; + + // Try to join with IP outside any pool range + let ip_not_in_pool = "224.200.0.50"; // Not in 224.100.0.1-10 + let join_url = format!( + "/v1/instances/{instance_name}/multicast-groups/{ip_not_in_pool}?project={project_name}" + ); + let join_body = InstanceMulticastGroupJoin { source_ips: None }; + + let error = NexusRequest::new( + RequestBuilder::new(client, Method::PUT, &join_url) + .body(Some(&join_body)) + .expect_status(Some(StatusCode::BAD_REQUEST)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("IP not in pool should fail"); + + let error_body: dropshot::HttpErrorResponseBody = + error.parsed_body().unwrap(); + assert!( + error_body.message.contains("pool") + || error_body.message.contains("range"), + "Error should mention pool or range: {}", + error_body.message + ); + + cleanup_instances(cptestctx, client, project_name, &[instance_name]).await; +} + +/// Test joining existing group by IP: second instance joins the same IP +/// without specifying sources. +#[nexus_test] +async fn test_join_by_ip_existing_group(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + let project_name = "join-by-ip-existing-project"; + + // Setup + let (_, _, _) = ops::join3( + create_project(client, project_name), + create_default_ip_pool(client), + create_multicast_ip_pool_with_range( + client, + "existing-pool", + (224, 50, 0, 1), + (224, 50, 0, 255), + ), + ) + .await; + + create_instance(client, project_name, "existing-inst-1").await; + create_instance(client, project_name, "existing-inst-2").await; + + let explicit_ip = "224.50.0.77"; + let expected_group_name = + format!("mcast-{}", explicit_ip.replace('.', "-")); + + // First instance implicitly creates the group by joining with IP + let join_url_1 = format!( + "/v1/instances/existing-inst-1/multicast-groups/{explicit_ip}?project={project_name}" + ); + let member1: MulticastGroupMember = NexusRequest::new( + RequestBuilder::new(client, Method::PUT, &join_url_1) + .body(Some(&InstanceMulticastGroupJoin::default())) + .expect_status(Some(StatusCode::CREATED)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("First join-by-IP should succeed") + .parsed_body() + .expect("Should parse member"); + + wait_for_group_active(client, &expected_group_name).await; + + // Second instance joins the same IP; should attach to existing group + let join_url_2 = format!( + "/v1/instances/existing-inst-2/multicast-groups/{explicit_ip}?project={project_name}" + ); + let member2: MulticastGroupMember = NexusRequest::new( + RequestBuilder::new(client, Method::PUT, &join_url_2) + .body(Some(&InstanceMulticastGroupJoin::default())) + .expect_status(Some(StatusCode::CREATED)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("Second join-by-IP should succeed") + .parsed_body() + .expect("Should parse member"); + + // Both members should have the same group and IP + assert_eq!(member1.multicast_group_id, member2.multicast_group_id); + assert_eq!(member1.multicast_ip, member2.multicast_ip); + assert_eq!(member1.multicast_ip.to_string(), explicit_ip); + + // Verify group has 2 members + let members = + list_multicast_group_members(client, &expected_group_name).await; + assert_eq!(members.len(), 2, "Group should have 2 members"); + + cleanup_instances( + cptestctx, + client, + project_name, + &["existing-inst-1", "existing-inst-2"], + ) + .await; + wait_for_group_deleted(client, &expected_group_name).await; +} + +/// Test source mismatch when joining existing group by IP. +/// If an SSM group exists and a new instance tries to join with different +/// sources, it should fail. +#[nexus_test] +async fn test_join_by_ip_source_mismatch_fails( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + let project_name = "join-by-ip-mismatch-project"; + + // Setup with SSM pool + let (_, _, _ssm_pool) = ops::join3( + create_project(client, project_name), + create_default_ip_pool(client), + create_multicast_ip_pool_with_range( + client, + "mismatch-ssm-pool", + (232, 60, 0, 1), + (232, 60, 0, 255), + ), + ) + .await; + + create_instance(client, project_name, "mismatch-inst-1").await; + create_instance(client, project_name, "mismatch-inst-2").await; + + let explicit_ssm_ip = "232.60.0.88"; + let expected_group_name = + format!("mcast-{}", explicit_ssm_ip.replace('.', "-")); + let source1: IpAddr = "10.1.1.1".parse().unwrap(); + let source2: IpAddr = "10.2.2.2".parse().unwrap(); + + // First instance implicitly creates SSM group with source1 + let join_url_1 = format!( + "/v1/instances/mismatch-inst-1/multicast-groups/{explicit_ssm_ip}?project={project_name}" + ); + NexusRequest::new( + RequestBuilder::new(client, Method::PUT, &join_url_1) + .body(Some(&InstanceMulticastGroupJoin { + source_ips: Some(vec![source1]), + })) + .expect_status(Some(StatusCode::CREATED)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("First SSM join should succeed") + .parsed_body::() + .expect("Should parse member"); + + wait_for_group_active(client, &expected_group_name).await; + + // Second instance tries to join with different source; should fail + let join_url_2 = format!( + "/v1/instances/mismatch-inst-2/multicast-groups/{explicit_ssm_ip}?project={project_name}" + ); + let error = NexusRequest::new( + RequestBuilder::new(client, Method::PUT, &join_url_2) + .body(Some(&InstanceMulticastGroupJoin { + source_ips: Some(vec![source2]), + })) + .expect_status(Some(StatusCode::BAD_REQUEST)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("Mismatched sources should fail"); + + let error_body: dropshot::HttpErrorResponseBody = + error.parsed_body().unwrap(); + assert!( + error_body.message.contains("source"), + "Error should mention source IPs: {}", + error_body.message + ); + + cleanup_instances( + cptestctx, + client, + project_name, + &["mismatch-inst-1", "mismatch-inst-2"], + ) + .await; + wait_for_group_deleted(client, &expected_group_name).await; +} + +/// Test that joining an existing ASM group with sources specified fails. +/// ASM groups have no source filtering, so specifying sources is invalid. +#[nexus_test] +async fn test_join_by_ip_asm_with_sources_fails( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + let project_name = "join-by-ip-asm-sources-project"; + + // Setup: project and pools + let (_, _, _) = ops::join3( + create_project(client, project_name), + create_default_ip_pool(client), + create_multicast_ip_pool_with_range( + client, + "asm-sources-pool", + (224, 70, 0, 1), + (224, 70, 0, 255), + ), + ) + .await; + + // Create two instances + let (instance1, instance2) = ops::join2( + create_instance(client, project_name, "asm-sources-inst-1"), + create_instance(client, project_name, "asm-sources-inst-2"), + ) + .await; + + // First instance joins ASM IP without sources (valid for ASM) + let explicit_ip: IpAddr = "224.70.0.55".parse().unwrap(); + let join_url = format!( + "/v1/instances/{}/multicast-groups/{explicit_ip}?project={project_name}", + instance1.identity.name + ); + + NexusRequest::new( + RequestBuilder::new(client, Method::PUT, &join_url) + .body(Some(&InstanceMulticastGroupJoin::default())) + .expect_status(Some(StatusCode::CREATED)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("First instance ASM join should succeed"); + + let expected_group_name = + format!("mcast-{}", explicit_ip.to_string().replace('.', "-")); + wait_for_group_active(client, &expected_group_name).await; + + // Second instance tries to join the same ASM group WITH sources + // ASM groups don't support source filtering + let join_url2 = format!( + "/v1/instances/{}/multicast-groups/{explicit_ip}?project={project_name}", + instance2.identity.name + ); + let bogus_source: IpAddr = "10.99.99.99".parse().unwrap(); + + let error_response = NexusRequest::new( + RequestBuilder::new(client, Method::PUT, &join_url2) + .body(Some(&InstanceMulticastGroupJoin { + source_ips: Some(vec![bogus_source]), + })) + .expect_status(Some(StatusCode::BAD_REQUEST)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("ASM join with sources should fail"); + + let error_body: dropshot::HttpErrorResponseBody = + error_response.parsed_body().unwrap(); + assert!( + error_body.message.to_lowercase().contains("source"), + "Error should mention source IPs, got: {}", + error_body.message + ); + + cleanup_instances( + cptestctx, + client, + project_name, + &["asm-sources-inst-1", "asm-sources-inst-2"], + ) + .await; + wait_for_group_deleted(client, &expected_group_name).await; } diff --git a/nexus/tests/integration_tests/multicast/authorization.rs b/nexus/tests/integration_tests/multicast/authorization.rs index 5247e4fe2a6..89ee1ced6d2 100644 --- a/nexus/tests/integration_tests/multicast/authorization.rs +++ b/nexus/tests/integration_tests/multicast/authorization.rs @@ -2,26 +2,13 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! Authorization tests for fleet-scoped multicast groups. +//! Authorization tests for multicast groups. //! -//! Multicast groups are fleet-scoped resources with explicit permissions granted -//! to any authenticated user in the fleet (defined in nexus/auth/src/authz/omicron.polar). +//! Groups are fleet-scoped. Any authenticated user can list and read them. +//! Member operations require modify permission on the instance being added. //! -//! **Authorization model (intentionally deviates from standard Oxide IAM):** -//! - **Read/List**: Any authenticated user can read and list multicast groups in their fleet -//! (no Fleet::Viewer role required) -//! - **Create**: Any authenticated user can create multicast groups in their fleet -//! (no Fleet::Admin role required) -//! - **Modify/Delete**: Any authenticated user can modify and delete multicast groups in their fleet -//! (no Fleet::Admin role required) -//! - **Member operations**: Users can add/remove instances they own (requires instance permissions) -//! -//! This enables cross-project and cross-silo multicast communication. Users -//! with ONLY project-level roles (e.g., Project::Collaborator) and NO -//! silo-level roles can still access multicast groups, because the only -//! requirement is being an authenticated user in a silo within the fleet. - -use std::net::{IpAddr, Ipv4Addr}; +//! Pool linking controls access: a silo can only use pools linked to it. +//! Cross-silo multicast works by linking the same pool to multiple silos. use http::StatusCode; @@ -29,148 +16,126 @@ use nexus_test_utils::http_testing::{AuthnMode, NexusRequest, RequestBuilder}; use nexus_test_utils::resource_helpers::test_params::UserPassword; use nexus_test_utils::resource_helpers::{ create_default_ip_pool, create_instance, create_local_user, create_project, - grant_iam, link_ip_pool, object_get, + grant_iam, link_ip_pool, object_create, object_get, }; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params::{ - InstanceCreate, InstanceNetworkInterfaceAttachment, MulticastGroupCreate, - MulticastGroupMemberAdd, MulticastGroupUpdate, ProjectCreate, + InstanceCreate, InstanceNetworkInterfaceAttachment, + MulticastGroupMemberAdd, ProjectCreate, SiloCreate, SiloQuotasCreate, +}; +use nexus_types::external_api::shared::{ + ProjectRole, SiloIdentityMode, SiloRole, }; -use nexus_types::external_api::shared::{ProjectRole, SiloRole}; use nexus_types::external_api::views::{ MulticastGroup, MulticastGroupMember, Silo, }; use omicron_common::api::external::{ - ByteCount, Hostname, IdentityMetadataCreateParams, - IdentityMetadataUpdateParams, Instance, InstanceCpuCount, NameOrId, + ByteCount, Hostname, IdentityMetadataCreateParams, Instance, + InstanceCpuCount, NameOrId, }; -use omicron_common::vlan::VlanID; +use omicron_uuid_kinds::SiloUserUuid; use super::*; -/// Test that silo users can create and modify multicast groups in their fleet. -/// This verifies the authorization model where any authenticated silo user -/// can manage multicast groups. -#[nexus_test] -async fn test_silo_users_can_create_and_modify_multicast_groups( - cptestctx: &ControlPlaneTestContext, -) { - let client = &cptestctx.external_client; - create_default_ip_pool(&client).await; - - // Get current silo info - let silo_url = format!("/v1/system/silos/{}", cptestctx.silo_name); - let silo: Silo = object_get(client, &silo_url).await; - - // Create multicast IP pool (as fleet admin) - create_multicast_ip_pool(&client, "mcast-pool").await; - link_ip_pool(&client, "mcast-pool", &silo.identity.id, false).await; - - // Create a regular silo user (collaborator) - let user = create_local_user( - client, - &silo, - &"test-user".parse().unwrap(), - UserPassword::LoginDisallowed, - ) - .await; - - // Grant collaborator role to the user - grant_iam( - client, - &silo_url, - SiloRole::Collaborator, - user.id, - AuthnMode::PrivilegedUser, - ) - .await; - - // Create multicast group as the silo user - should SUCCEED - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 0, 1, 101)); - let group_url = "/v1/multicast-groups"; - let group_params = MulticastGroupCreate { +/// Create a multicast group via the member-add implicitly create pattern. +/// +/// This creates a project and instance for the user, then adds the instance +/// as a member to the specified group name. Since there's no explicit create +/// endpoint, adding the first member implicitly creates the group. +/// +/// Returns the implicitly created multicast group. +async fn create_group_via_member_add( + client: &dropshot::test_util::ClientTestContext, + user_id: SiloUserUuid, + group_name: &str, + _pool_name: &str, +) -> MulticastGroup { + // Use unique project/instance names based on group name to avoid conflicts + let project_name = format!("{group_name}-project"); + let instance_name = format!("{group_name}-instance"); + + // Case: Create a project as the user + let project_params = ProjectCreate { identity: IdentityMetadataCreateParams { - name: "user-group".parse().unwrap(), - description: "Group created by silo user".to_string(), + name: project_name.parse().unwrap(), + description: format!("Project for {group_name}"), }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name("mcast-pool".parse().unwrap())), - mvlan: None, }; - // Silo user can create multicast group - let group: MulticastGroup = NexusRequest::new( - RequestBuilder::new(client, http::Method::POST, &group_url) - .body(Some(&group_params)) + NexusRequest::new( + RequestBuilder::new(client, http::Method::POST, "/v1/projects") + .body(Some(&project_params)) .expect_status(Some(StatusCode::CREATED)), ) - .authn_as(AuthnMode::SiloUser(user.id)) + .authn_as(AuthnMode::SiloUser(user_id)) .execute() .await - .unwrap() - .parsed_body() - .unwrap(); + .expect("User should be able to create project"); - assert_eq!(group.identity.name.as_str(), "user-group"); - assert_eq!(group.multicast_ip, multicast_ip); - - // Wait for group to become active before updating - wait_for_group_active(client, "user-group").await; - - // Silo user can also modify the multicast group they created - let update_url = mcast_group_url(&group.identity.name.to_string()); - let update_params = MulticastGroupUpdate { - identity: IdentityMetadataUpdateParams { - name: Some(group.identity.name.clone()), - description: Some("Updated description by silo user".to_string()), + // Case: Create an instance as the user (stopped) + let instance_params = InstanceCreate { + identity: IdentityMetadataCreateParams { + name: instance_name.parse().unwrap(), + description: format!("Instance for {group_name}"), }, - source_ips: None, - mvlan: None, + ncpus: InstanceCpuCount::try_from(2).unwrap(), + memory: ByteCount::from_gibibytes_u32(1), + hostname: instance_name.parse().unwrap(), + user_data: vec![], + ssh_public_keys: None, + network_interfaces: InstanceNetworkInterfaceAttachment::Default, + external_ips: vec![], + multicast_groups: vec![], + disks: vec![], + boot_disk: None, + cpu_platform: None, + start: false, + auto_restart_policy: Default::default(), + anti_affinity_groups: Vec::new(), }; - let updated_group: MulticastGroup = NexusRequest::new( - RequestBuilder::new(client, http::Method::PUT, &update_url) - .body(Some(&update_params)) - .expect_status(Some(StatusCode::OK)), + let instance_url = format!("/v1/instances?project={project_name}"); + NexusRequest::new( + RequestBuilder::new(client, http::Method::POST, &instance_url) + .body(Some(&instance_params)) + .expect_status(Some(StatusCode::CREATED)), ) - .authn_as(AuthnMode::SiloUser(user.id)) + .authn_as(AuthnMode::SiloUser(user_id)) .execute() .await - .unwrap() - .parsed_body() - .unwrap(); + .expect("User should be able to create instance"); - assert_eq!( - updated_group.identity.description, - "Updated description by silo user" + // Case: Add the instance as a member (implicitly creates the group) + let member_add_url = format!( + "/v1/multicast-groups/{group_name}/members?project={project_name}" ); - - // Fleet admin can also create multicast groups - let admin_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: "admin-group".parse().unwrap(), - description: "Group created by fleet admin".to_string(), - }, - multicast_ip: Some(IpAddr::V4(Ipv4Addr::new(224, 0, 1, 102))), + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_name.parse().unwrap()), source_ips: None, - pool: Some(NameOrId::Name("mcast-pool".parse().unwrap())), - mvlan: None, }; - let admin_group: MulticastGroup = NexusRequest::new( - RequestBuilder::new(client, http::Method::POST, &group_url) - .body(Some(&admin_params)) + NexusRequest::new( + RequestBuilder::new(client, http::Method::POST, &member_add_url) + .body(Some(&member_params)) .expect_status(Some(StatusCode::CREATED)), ) - .authn_as(AuthnMode::PrivilegedUser) + .authn_as(AuthnMode::SiloUser(user_id)) .execute() .await - .unwrap() - .parsed_body() - .unwrap(); + .expect("User should be able to add member (implicitly creates group)"); - assert_eq!(admin_group.identity.name.as_str(), "admin-group"); + // Case: Fetch and return the implicitly created group + let group_url = mcast_group_url(group_name); + NexusRequest::new( + RequestBuilder::new(client, http::Method::GET, &group_url) + .expect_status(Some(StatusCode::OK)), + ) + .authn_as(AuthnMode::SiloUser(user_id)) + .execute() + .await + .expect("User should be able to read implicitly created group") + .parsed_body() + .unwrap() } /// Test that silo users can attach their own instances to fleet-scoped @@ -209,16 +174,26 @@ async fn test_silo_users_can_attach_instances_to_multicast_groups( ) .await; - // Create project as the silo user - let project_url = "/v1/projects"; + // User creates group via member-add (implicitly creates the group with first instance) + let group = create_group_via_member_add( + client, + user.id, + "shared-group", + "mcast-pool", + ) + .await; + + wait_for_group_active(client, "shared-group").await; + + // User creates a second instance in a new project to test adding to existing group let project_params = ProjectCreate { identity: IdentityMetadataCreateParams { - name: "user-project".parse().unwrap(), - description: "Project created by silo user".to_string(), + name: "second-project".parse().unwrap(), + description: "Second project for testing".to_string(), }, }; NexusRequest::new( - RequestBuilder::new(client, http::Method::POST, project_url) + RequestBuilder::new(client, http::Method::POST, "/v1/projects") .body(Some(&project_params)) .expect_status(Some(StatusCode::CREATED)), ) @@ -227,41 +202,14 @@ async fn test_silo_users_can_attach_instances_to_multicast_groups( .await .unwrap(); - // Fleet admin creates multicast group - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 0, 1, 100)); - let group_url = "/v1/multicast-groups"; - let group_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: "shared-group".parse().unwrap(), - description: "Fleet-scoped multicast group".to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name("mcast-pool".parse().unwrap())), - mvlan: None, - }; - let group: MulticastGroup = NexusRequest::new( - RequestBuilder::new(client, http::Method::POST, &group_url) - .body(Some(&group_params)) - .expect_status(Some(StatusCode::CREATED)), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); - - // Silo user creates instance in their project - let instance_url = "/v1/instances?project=user-project"; let instance_params = InstanceCreate { identity: IdentityMetadataCreateParams { - name: "user-instance".parse().unwrap(), - description: "Instance created by silo user".to_string(), + name: "second-instance".parse().unwrap(), + description: "Second instance for testing".to_string(), }, ncpus: InstanceCpuCount::try_from(1).unwrap(), memory: ByteCount::from_gibibytes_u32(1), - hostname: "user-instance".parse::().unwrap(), + hostname: "second-instance".parse::().unwrap(), user_data: vec![], ssh_public_keys: None, network_interfaces: InstanceNetworkInterfaceAttachment::Default, @@ -276,9 +224,13 @@ async fn test_silo_users_can_attach_instances_to_multicast_groups( }; let instance: Instance = NexusRequest::new( - RequestBuilder::new(client, http::Method::POST, &instance_url) - .body(Some(&instance_params)) - .expect_status(Some(StatusCode::CREATED)), + RequestBuilder::new( + client, + http::Method::POST, + "/v1/instances?project=second-project", + ) + .body(Some(&instance_params)) + .expect_status(Some(StatusCode::CREATED)), ) .authn_as(AuthnMode::SiloUser(user.id)) .execute() @@ -287,14 +239,15 @@ async fn test_silo_users_can_attach_instances_to_multicast_groups( .parsed_body() .unwrap(); - // Silo user can attach their instance to the fleet-scoped multicast group + // User can attach additional instance to existing multicast group let member_params = MulticastGroupMemberAdd { instance: NameOrId::Id(instance.identity.id), + source_ips: None, }; let member_add_url = mcast_group_member_add_url( &group.identity.name.to_string(), &member_params.instance, - "user-project", + "second-project", ); let member: MulticastGroupMember = NexusRequest::new( @@ -328,10 +281,29 @@ async fn test_authenticated_users_can_read_multicast_groups( // Create multicast pool and link to silo create_multicast_ip_pool(&client, "mcast-pool").await; + link_ip_pool(&client, "default", &silo.identity.id, true).await; link_ip_pool(&client, "mcast-pool", &silo.identity.id, false).await; + // Create a collaborator user who can create groups + let creator = create_local_user( + client, + &silo, + &"creator-user".parse().unwrap(), + UserPassword::LoginDisallowed, + ) + .await; + + grant_iam( + client, + &silo_url, + SiloRole::Collaborator, + creator.id, + AuthnMode::PrivilegedUser, + ) + .await; + // Create a regular silo user with NO special roles (not even viewer) - let user = create_local_user( + let reader = create_local_user( client, &silo, &"regular-user".parse().unwrap(), @@ -339,31 +311,14 @@ async fn test_authenticated_users_can_read_multicast_groups( ) .await; - // Fleet admin creates a multicast group - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 0, 1, 100)); - let group_url = "/v1/multicast-groups"; - let group_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: "readable-group".parse().unwrap(), - description: "Group that should be readable by all silo users" - .to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name("mcast-pool".parse().unwrap())), - mvlan: Some(VlanID::new(100).unwrap()), - }; - let group: MulticastGroup = NexusRequest::new( - RequestBuilder::new(client, http::Method::POST, &group_url) - .body(Some(&group_params)) - .expect_status(Some(StatusCode::CREATED)), + // Creator creates a multicast group via member-add + let group = create_group_via_member_add( + client, + creator.id, + "readable-group", + "mcast-pool", ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + .await; // Wait for group to become active wait_for_group_active(client, "readable-group").await; @@ -374,7 +329,7 @@ async fn test_authenticated_users_can_read_multicast_groups( RequestBuilder::new(client, http::Method::GET, &get_group_url) .expect_status(Some(StatusCode::OK)), ) - .authn_as(AuthnMode::SiloUser(user.id)) + .authn_as(AuthnMode::SiloUser(reader.id)) .execute() .await .expect("Silo user should be able to read multicast group") @@ -383,8 +338,6 @@ async fn test_authenticated_users_can_read_multicast_groups( assert_eq!(read_group.identity.id, group.identity.id); assert_eq!(read_group.identity.name, group.identity.name); - assert_eq!(read_group.multicast_ip, multicast_ip); - assert_eq!(read_group.mvlan, Some(VlanID::new(100).unwrap())); // Regular silo user can also LIST multicast groups let list_groups: Vec = NexusRequest::iter_collection_authn( @@ -401,6 +354,24 @@ async fn test_authenticated_users_can_read_multicast_groups( list_groups.iter().any(|g| g.identity.id == group.identity.id), "Multicast group should appear in list for silo user" ); + + // Regular silo user can also lookup group by IP address + // The main multicast-groups endpoint accepts Name, ID, or IP + let multicast_ip = group.multicast_ip; + let ip_lookup_url = format!("/v1/multicast-groups/{multicast_ip}"); + let ip_lookup_group: MulticastGroup = NexusRequest::new( + RequestBuilder::new(client, http::Method::GET, &ip_lookup_url) + .expect_status(Some(StatusCode::OK)), + ) + .authn_as(AuthnMode::SiloUser(reader.id)) + .execute() + .await + .expect("Silo user should be able to lookup group by IP") + .parsed_body() + .unwrap(); + + assert_eq!(ip_lookup_group.identity.id, group.identity.id); + assert_eq!(ip_lookup_group.multicast_ip, multicast_ip); } /// Test that instances from different projects can attach to the same @@ -412,7 +383,7 @@ async fn test_cross_project_instance_attachment_allowed( let client = &cptestctx.external_client; // Create pools and projects - let (_, _project1, _project2, mcast_pool) = ops::join4( + let (_, _project1, _project2, _) = ops::join4( create_default_ip_pool(&client), create_project(client, "project1"), create_project(client, "project2"), @@ -420,51 +391,31 @@ async fn test_cross_project_instance_attachment_allowed( ) .await; - // Fleet admin creates a multicast group - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 0, 1, 100)); - let group_url = "/v1/multicast-groups"; - let group_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: "cross-project-group".parse().unwrap(), - description: "Fleet-scoped group for cross-project test" - .to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; - let group: MulticastGroup = NexusRequest::new( - RequestBuilder::new(client, http::Method::POST, &group_url) - .body(Some(&group_params)) - .expect_status(Some(StatusCode::CREATED)), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); - // Create instances in both projects let instance1 = create_instance(client, "project1", "instance1").await; let instance2 = create_instance(client, "project2", "instance2").await; - // Attach instance from project1 to the group + // First member-add implicitly creates the group let member_params1 = MulticastGroupMemberAdd { instance: NameOrId::Id(instance1.identity.id), + source_ips: None, }; let member_add_url1 = mcast_group_member_add_url( - &group.identity.name.to_string(), + "cross-project-group", &member_params1.instance, "project1", ); let member1: MulticastGroupMember = object_create(client, &member_add_url1, &member_params1).await; + // Fetch the implicitly created group + let group: MulticastGroup = + object_get(client, &mcast_group_url("cross-project-group")).await; + // Attach instance from project2 to the SAME group - should succeed let member_params2 = MulticastGroupMemberAdd { instance: NameOrId::Id(instance2.identity.id), + source_ips: None, }; let member_add_url2 = mcast_group_member_add_url( &group.identity.name.to_string(), @@ -496,34 +447,34 @@ async fn test_unauthenticated_cannot_list_multicast_groups( // Create multicast pool and link to silo create_multicast_ip_pool(&client, "mcast-pool").await; + link_ip_pool(&client, "default", &silo.identity.id, true).await; link_ip_pool(&client, "mcast-pool", &silo.identity.id, false).await; - // Fleet admin creates a multicast group - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 0, 1, 150)); - let group_url = "/v1/multicast-groups"; - let group_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: "test-group".parse().unwrap(), - description: "Group for auth test".to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name("mcast-pool".parse().unwrap())), - mvlan: None, - }; + // Create a collaborator user who can create groups + let creator = create_local_user( + client, + &silo, + &"creator-user".parse().unwrap(), + UserPassword::LoginDisallowed, + ) + .await; - NexusRequest::new( - RequestBuilder::new(client, http::Method::POST, &group_url) - .body(Some(&group_params)) - .expect_status(Some(StatusCode::CREATED)), + grant_iam( + client, + &silo_url, + SiloRole::Collaborator, + creator.id, + AuthnMode::PrivilegedUser, ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap(); + .await; + + // Creator creates a multicast group via member-add + create_group_via_member_add(client, creator.id, "test-group", "mcast-pool") + .await; // Try to list multicast groups without authentication - should get 401 Unauthorized - RequestBuilder::new(client, http::Method::GET, &group_url) + let group_url = "/v1/multicast-groups"; + RequestBuilder::new(client, http::Method::GET, group_url) .expect_status(Some(StatusCode::UNAUTHORIZED)) .execute() .await @@ -548,36 +499,38 @@ async fn test_unauthenticated_cannot_access_member_operations( link_ip_pool(&client, "default", &silo.identity.id, true).await; link_ip_pool(&client, "mcast-pool", &silo.identity.id, false).await; - // Create project and instance + // Create a collaborator user who can create groups + let creator = create_local_user( + client, + &silo, + &"creator-user".parse().unwrap(), + UserPassword::LoginDisallowed, + ) + .await; + + grant_iam( + client, + &silo_url, + SiloRole::Collaborator, + creator.id, + AuthnMode::PrivilegedUser, + ) + .await; + + // Creator creates a multicast group via member-add + let group = create_group_via_member_add( + client, + creator.id, + "auth-test-group", + "mcast-pool", + ) + .await; + + // Create a second project and instance for testing unauthenticated add let project = create_project(client, "test-project").await; let instance = create_instance(client, "test-project", "test-instance").await; - // Fleet admin creates multicast group - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 0, 1, 150)); - let group_url = "/v1/multicast-groups"; - let group_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: "auth-test-group".parse().unwrap(), - description: "Group for auth test".to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name("mcast-pool".parse().unwrap())), - mvlan: None, - }; - let group: MulticastGroup = NexusRequest::new( - RequestBuilder::new(client, http::Method::POST, &group_url) - .body(Some(&group_params)) - .expect_status(Some(StatusCode::CREATED)), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); - // Try to LIST members without authentication - should get 401 let members_url = mcast_group_members_url(&group.identity.name.to_string()); RequestBuilder::new(client, http::Method::GET, &members_url) @@ -589,6 +542,7 @@ async fn test_unauthenticated_cannot_access_member_operations( // Try to ADD member without authentication - should get 401 let member_params = MulticastGroupMemberAdd { instance: NameOrId::Id(instance.identity.id), + source_ips: None, }; let member_add_url = mcast_group_member_add_url( &group.identity.name.to_string(), @@ -622,7 +576,7 @@ async fn test_unauthenticated_cannot_access_member_operations( /// group members even though they don't have access to the member instances. /// /// This validates that listing members only requires Read permission on the -/// multicast group (fleet-scoped), NOT permissions on individual instances. +/// multicast group (fleet-scoped), not permissions on individual instances. #[nexus_test] async fn test_unprivileged_users_can_list_group_members( cptestctx: &ControlPlaneTestContext, @@ -666,108 +620,20 @@ async fn test_unprivileged_users_can_list_group_members( ) .await; - // Privileged user creates their own project - let project_url = "/v1/projects"; - let project_params = ProjectCreate { - identity: IdentityMetadataCreateParams { - name: "privileged-project".parse().unwrap(), - description: "Project owned by privileged user".to_string(), - }, - }; - NexusRequest::new( - RequestBuilder::new(client, http::Method::POST, project_url) - .body(Some(&project_params)) - .expect_status(Some(StatusCode::CREATED)), + // Privileged user creates group via member-add (implicitly creates group with first instance) + let group = create_group_via_member_add( + client, + privileged_user.id, + "asymmetric-test-group", + "mcast-pool", ) - .authn_as(AuthnMode::SiloUser(privileged_user.id)) - .execute() - .await - .unwrap(); + .await; - // Fleet admin creates multicast group - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 0, 1, 200)); - let group_url = "/v1/multicast-groups"; - let group_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: "asymmetric-test-group".parse().unwrap(), - description: "Group for testing asymmetric authorization" - .to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name("mcast-pool".parse().unwrap())), - mvlan: None, - }; - let group: MulticastGroup = NexusRequest::new( - RequestBuilder::new(client, http::Method::POST, &group_url) - .body(Some(&group_params)) - .expect_status(Some(StatusCode::CREATED)), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + // The helper created an instance and added it as a member + let members_url = mcast_group_members_url(&group.identity.name.to_string()); - // Privileged user creates instance in their project - let instance_url = "/v1/instances?project=privileged-project"; - let instance_params = InstanceCreate { - identity: IdentityMetadataCreateParams { - name: "privileged-instance".parse().unwrap(), - description: "Instance in privileged user's project".to_string(), - }, - ncpus: InstanceCpuCount::try_from(1).unwrap(), - memory: ByteCount::from_gibibytes_u32(1), - hostname: "privileged-instance".parse::().unwrap(), - user_data: vec![], - ssh_public_keys: None, - network_interfaces: InstanceNetworkInterfaceAttachment::Default, - external_ips: vec![], - multicast_groups: vec![], - disks: vec![], - boot_disk: None, - cpu_platform: None, - start: false, - auto_restart_policy: Default::default(), - anti_affinity_groups: Vec::new(), - }; - - let instance: Instance = NexusRequest::new( - RequestBuilder::new(client, http::Method::POST, &instance_url) - .body(Some(&instance_params)) - .expect_status(Some(StatusCode::CREATED)), - ) - .authn_as(AuthnMode::SiloUser(privileged_user.id)) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); - - // Privileged user adds their instance to the group - let member_params = MulticastGroupMemberAdd { - instance: NameOrId::Id(instance.identity.id), - }; - let member_add_url = mcast_group_member_add_url( - &group.identity.name.to_string(), - &member_params.instance, - "privileged-project", - ); - - NexusRequest::new( - RequestBuilder::new(client, http::Method::POST, &member_add_url) - .body(Some(&member_params)) - .expect_status(Some(StatusCode::CREATED)), - ) - .authn_as(AuthnMode::SiloUser(privileged_user.id)) - .execute() - .await - .unwrap(); - - // Unprivileged user (who does NOT have access to - // privileged-project or privileged-instance) CAN list the group members - let members_url = mcast_group_members_url(&group.identity.name.to_string()); + // Unprivileged user (who does not have access to the privileged user's project + // or instances) CAN list the group members - this is the asymmetric authorization let members_response: dropshot::ResultsPage = NexusRequest::object_get(client, &members_url) .authn_as(AuthnMode::SiloUser(unprivileged_user.id)) @@ -787,10 +653,6 @@ async fn test_unprivileged_users_can_list_group_members( 1, "Should see 1 member in the group (even though unprivileged user doesn't own it)" ); - assert_eq!( - members[0].instance_id, instance.identity.id, - "Should see the privileged user's instance ID in member list" - ); assert_eq!( members[0].multicast_group_id, group.identity.id, "Member should be associated with the correct group" @@ -808,14 +670,27 @@ async fn test_unprivileged_users_can_list_group_members( let privileged_members = privileged_response.items; assert_eq!(privileged_members.len(), 1); - assert_eq!(privileged_members[0].instance_id, instance.identity.id); assert_eq!(privileged_members[0].multicast_group_id, group.identity.id); - // Unprivileged user should get 404 (NOT 403) when trying to add/remove + // Unprivileged user should get 404 (not 403) when trying to add/remove // instances from inaccessible projects - // Try to ADD the instance (should get 404 because unprivileged user + // The helper created an instance with a predictable name + let instance_name = "asymmetric-test-group-instance"; + let project_name = "asymmetric-test-group-project"; + + // Try to ADD the existing instance again (should get 404 because unprivileged user // can't see the instance, not 403 which would leak its existence) + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: None, + }; + let member_add_url = mcast_group_member_add_url( + &group.identity.name.to_string(), + &member_params.instance, + project_name, + ); + NexusRequest::new( RequestBuilder::new(client, http::Method::POST, &member_add_url) .body(Some(&member_params)) @@ -830,9 +705,10 @@ async fn test_unprivileged_users_can_list_group_members( // Try to REMOVE the instance (should get 404, not 403) let member_delete_url = format!( - "{}/{}?project=privileged-project", + "{}/{}?project={}", mcast_group_members_url(&group.identity.name.to_string()), - instance.identity.name + instance_name, + project_name ); NexusRequest::new( @@ -859,12 +735,53 @@ async fn test_unprivileged_users_can_list_group_members( 1, "Member should still exist after failed unauthorized operations" ); + + // Parity: instance-centric endpoint should enforce the same behavior (404) + // when operating on someone else's instance + + // Use the instance ID from the member list + let instance_id = privileged_members[0].instance_id; + + // Attempt JOIN via instance-centric path (by ID) as unprivileged user + let group_id = group.identity.id; + let inst_join_url_id = + format!("/v1/instances/{instance_id}/multicast-groups/{group_id}"); + let inst_join_body = serde_json::json!({}); + NexusRequest::new( + RequestBuilder::new(client, http::Method::PUT, &inst_join_url_id) + .body(Some(&inst_join_body)) + .expect_status(Some(StatusCode::NOT_FOUND)), + ) + .authn_as(AuthnMode::SiloUser(unprivileged_user.id)) + .execute() + .await + .expect( + "Instance-centric join should return 404 for inaccessible instance", + ); + + // Attempt LEAVE via instance-centric path (by ID) as unprivileged user + let inst_leave_url_id = inst_join_url_id.clone(); + NexusRequest::new( + RequestBuilder::new(client, http::Method::DELETE, &inst_leave_url_id) + .expect_status(Some(StatusCode::NOT_FOUND)), + ) + .authn_as(AuthnMode::SiloUser(unprivileged_user.id)) + .execute() + .await + .expect( + "Instance-centric leave should return 404 for inaccessible instance", + ); } /// Test that authenticated silo users with ONLY project-level roles (no -/// silo-level roles) can still access multicast groups fleet-wide. This verifies -/// that being an authenticated SiloUser is sufficient - multicast group access -/// does not depend on having any specific silo-level or project-level roles. +/// silo-level roles) can still access multicast groups. This verifies that +/// being an authenticated SiloUser is sufficient - multicast group access does +/// not depend on having any specific silo-level or project-level roles. +/// +/// This verifies that project-only users can: +/// - List and read multicast groups (fleet-scoped discovery) +/// - Implicitly create groups via member-add API (group owned by their silo) +/// - Create instances and attach them to groups #[nexus_test] async fn test_project_only_users_can_access_multicast_groups( cptestctx: &ControlPlaneTestContext, @@ -873,7 +790,7 @@ async fn test_project_only_users_can_access_multicast_groups( // create_default_ip_pool already links "default" pool to the DEFAULT_SILO create_default_ip_pool(&client).await; - // Create multicast pool (fleet-scoped, no per-silo linking needed) + // Create multicast pool (already linked to DEFAULT_SILO by helper) create_multicast_ip_pool(&client, "mcast-pool").await; // Get the DEFAULT silo (same silo as the privileged test user) @@ -909,31 +826,32 @@ async fn test_project_only_users_can_access_multicast_groups( ) .await; - // Fleet admin creates a multicast group - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 0, 1, 250)); - let group_url = "/v1/multicast-groups"; - let group_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: "project-user-test".parse().unwrap(), - description: "Group for testing project-only user access" - .to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name("mcast-pool".parse().unwrap())), - mvlan: None, - }; - let group: MulticastGroup = NexusRequest::new( - RequestBuilder::new(client, http::Method::POST, &group_url) - .body(Some(&group_params)) - .expect_status(Some(StatusCode::CREATED)), + // Create a silo collaborator who can create the first group + let creator = create_local_user( + client, + &silo, + &"creator-user".parse().unwrap(), + UserPassword::LoginDisallowed, ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); + .await; + + grant_iam( + client, + &silo_url, + SiloRole::Collaborator, + creator.id, + AuthnMode::PrivilegedUser, + ) + .await; + + // Creator creates a multicast group via member-add + let group = create_group_via_member_add( + client, + creator.id, + "project-user-test", + "mcast-pool", + ) + .await; // Project-only user CAN LIST multicast groups (no silo roles needed) let list_response: dropshot::ResultsPage = @@ -967,80 +885,95 @@ async fn test_project_only_users_can_access_multicast_groups( assert_eq!(read_group.identity.id, group.identity.id); - // Project-only user CAN CREATE a multicast group - let user_group_params = MulticastGroupCreate { + // Project-only user CAN CREATE a multicast group via member-add + // They create an instance in their project, then add it as a member + let instance_params = InstanceCreate { identity: IdentityMetadataCreateParams { - name: "created-by-project-user".parse().unwrap(), - description: "Group created by project-only user".to_string(), + name: "project-user-instance".parse().unwrap(), + description: "Instance for testing project-only user".to_string(), }, - multicast_ip: Some(IpAddr::V4(Ipv4Addr::new(224, 0, 1, 251))), - source_ips: None, - pool: Some(NameOrId::Name("mcast-pool".parse().unwrap())), - mvlan: None, + ncpus: InstanceCpuCount::try_from(1).unwrap(), + memory: ByteCount::from_gibibytes_u32(1), + hostname: "project-user-instance".parse::().unwrap(), + user_data: vec![], + ssh_public_keys: None, + network_interfaces: InstanceNetworkInterfaceAttachment::Default, + external_ips: vec![], + multicast_groups: vec![], + disks: vec![], + boot_disk: None, + cpu_platform: None, + start: false, + auto_restart_policy: Default::default(), + anti_affinity_groups: Vec::new(), }; - let user_created_group: MulticastGroup = NexusRequest::new( - RequestBuilder::new(client, http::Method::POST, &group_url) - .body(Some(&user_group_params)) - .expect_status(Some(StatusCode::CREATED)), + let instance: Instance = NexusRequest::new( + RequestBuilder::new( + client, + http::Method::POST, + "/v1/instances?project=project-only", + ) + .body(Some(&instance_params)) + .expect_status(Some(StatusCode::CREATED)), ) .authn_as(AuthnMode::SiloUser(project_user.id)) .execute() .await - .expect("Project-only user should be able to create multicast group") + .expect( + "Project-only user should be able to create instance in their project", + ) .parsed_body() .unwrap(); - assert_eq!( - user_created_group.identity.name.as_str(), - "created-by-project-user" - ); - - // Wait for group to become active before modifying - wait_for_group_active(client, "created-by-project-user").await; - - // Project-only user CAN MODIFY multicast groups (including ones they created) - let update_url = - mcast_group_url(&user_created_group.identity.name.to_string()); - let update_params = MulticastGroupUpdate { - identity: IdentityMetadataUpdateParams { - name: Some(user_created_group.identity.name.clone()), - description: Some("Updated by project-only user".to_string()), - }, + // Add instance as member to implicitly create the group + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Id(instance.identity.id), source_ips: None, - mvlan: None, }; + let member_add_url = mcast_group_member_add_url( + "created-by-project-user", + &member_params.instance, + "project-only", + ); - let updated_group: MulticastGroup = NexusRequest::new( - RequestBuilder::new(client, http::Method::PUT, &update_url) - .body(Some(&update_params)) - .expect_status(Some(StatusCode::OK)), + NexusRequest::new( + RequestBuilder::new( + client, + http::Method::POST, + &member_add_url, + ) + .body(Some(&member_params)) + .expect_status(Some(StatusCode::CREATED)), ) .authn_as(AuthnMode::SiloUser(project_user.id)) .execute() .await - .expect("Project-only user should be able to modify multicast group") - .parsed_body() - .unwrap(); + .expect("Project-only user should be able to add member (implicitly creates group)"); + + // Fetch the implicitly created group + let user_created_group: MulticastGroup = + object_get(client, &mcast_group_url("created-by-project-user")).await; assert_eq!( - updated_group.identity.description, - "Updated by project-only user" + user_created_group.identity.name.as_str(), + "created-by-project-user" ); - // Project-only user CAN CREATE an instance in the project (Project::Collaborator) + // Project-only user CAN CREATE a second instance in the project (Project::Collaborator) // Must use project ID (not name) since user has no silo-level roles - let instance_name = "project-user-instance"; + let instance_name2 = "project-user-instance-2"; let instances_url = format!("/v1/instances?project={}", project.identity.id); let instance_params = InstanceCreate { identity: IdentityMetadataCreateParams { - name: instance_name.parse().unwrap(), - description: "Instance created by project-only user".to_string(), + name: instance_name2.parse().unwrap(), + description: "Second instance created by project-only user" + .to_string(), }, ncpus: InstanceCpuCount::try_from(1).unwrap(), memory: ByteCount::from_gibibytes_u32(1), - hostname: instance_name.parse().unwrap(), + hostname: instance_name2.parse().unwrap(), user_data: vec![], ssh_public_keys: None, network_interfaces: InstanceNetworkInterfaceAttachment::Default, @@ -1053,7 +986,7 @@ async fn test_project_only_users_can_access_multicast_groups( anti_affinity_groups: Vec::new(), multicast_groups: Vec::new(), }; - let instance: Instance = NexusRequest::objects_post( + let instance2: Instance = NexusRequest::objects_post( client, &instances_url, &instance_params, @@ -1068,14 +1001,15 @@ async fn test_project_only_users_can_access_multicast_groups( .expect("Should parse created instance"); // Project-only user CAN ATTACH the instance they own to a fleet-scoped group - let member_add_url = format!( - "{}?project={}", - mcast_group_members_url(&group.identity.name.to_string()), - project.identity.name - ); let member_params = MulticastGroupMemberAdd { - instance: NameOrId::Name(instance_name.parse().unwrap()), + instance: NameOrId::Name(instance_name2.parse().unwrap()), + source_ips: None, }; + let member_add_url = mcast_group_member_add_url( + &group.identity.name.to_string(), + &member_params.instance, + &project.identity.name.to_string(), + ); let member: MulticastGroupMember = NexusRequest::new( RequestBuilder::new(client, http::Method::POST, &member_add_url) .body(Some(&member_params)) @@ -1089,6 +1023,1049 @@ async fn test_project_only_users_can_access_multicast_groups( .unwrap(); // Verify the member was created successfully - assert_eq!(member.instance_id, instance.identity.id); + assert_eq!(member.instance_id, instance2.identity.id); assert_eq!(member.multicast_group_id, group.identity.id); } + +/// Test that users from different silos can both read multicast groups +/// (fleet-scoped visibility). This validates the core cross-silo multicast use case: +/// multicast groups are discoverable across silo boundaries. +/// +/// This test verifies: +/// - Users in different silos can both discover and read the same multicast groups +/// - Groups created by Silo A are visible to Silo B users (and vice versa) +#[nexus_test] +async fn test_silo_admins_cannot_modify_other_silos_groups( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + create_default_ip_pool(&client).await; + + // Create multicast IP pool (fleet-scoped) + create_multicast_ip_pool(&client, "mcast-pool").await; + + // Create Silo A (not using default test silo - it has Admin->FleetAdmin mapping) + // We explicitly create both silos with no fleet role mappings to test the + // authorization boundary correctly. + let silo_a_params = SiloCreate { + identity: IdentityMetadataCreateParams { + name: "silo-a".parse().unwrap(), + description: "First silo for cross-silo auth testing".to_string(), + }, + quotas: SiloQuotasCreate::empty(), + discoverable: false, + identity_mode: SiloIdentityMode::LocalOnly, + admin_group_name: None, + tls_certificates: vec![], + mapped_fleet_roles: Default::default(), + }; + + let silo_a: Silo = + NexusRequest::objects_post(client, "/v1/system/silos", &silo_a_params) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + + let silo_a_url = format!("/v1/system/silos/{}", silo_a.identity.name); + link_ip_pool(&client, "default", &silo_a.identity.id, true).await; + link_ip_pool(&client, "mcast-pool", &silo_a.identity.id, false).await; + + // Create Silo B + let silo_b_params = SiloCreate { + identity: IdentityMetadataCreateParams { + name: "silo-b".parse().unwrap(), + description: "Second silo for cross-silo auth testing".to_string(), + }, + quotas: SiloQuotasCreate::empty(), + discoverable: false, + identity_mode: SiloIdentityMode::LocalOnly, + admin_group_name: None, + tls_certificates: vec![], + mapped_fleet_roles: Default::default(), + }; + + let silo_b: Silo = + NexusRequest::objects_post(client, "/v1/system/silos", &silo_b_params) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + + let silo_b_url = format!("/v1/system/silos/{}", silo_b.identity.name); + link_ip_pool(&client, "default", &silo_b.identity.id, true).await; + link_ip_pool(&client, "mcast-pool", &silo_b.identity.id, false).await; + + // Create silo admin for Silo A + let admin_a = create_local_user( + client, + &silo_a, + &"admin-a".parse().unwrap(), + UserPassword::LoginDisallowed, + ) + .await; + + grant_iam( + client, + &silo_a_url, + SiloRole::Admin, + admin_a.id, + AuthnMode::PrivilegedUser, + ) + .await; + + // Create silo admin for Silo B + let admin_b = create_local_user( + client, + &silo_b, + &"admin-b".parse().unwrap(), + UserPassword::LoginDisallowed, + ) + .await; + + grant_iam( + client, + &silo_b_url, + SiloRole::Admin, + admin_b.id, + AuthnMode::PrivilegedUser, + ) + .await; + + // Admin A creates a multicast group via member-add (owned by Silo A) + let group_a = create_group_via_member_add( + client, + admin_a.id, + "group-owned-by-silo-a", + "mcast-pool", + ) + .await; + + // Admin B creates a multicast group via member-add (owned by Silo B) + let group_b = create_group_via_member_add( + client, + admin_b.id, + "group-owned-by-silo-b", + "mcast-pool", + ) + .await; + + // Both silo admins CAN READ each other's groups (fleet-scoped visibility) + let read_b_by_a: MulticastGroup = NexusRequest::new( + RequestBuilder::new( + client, + http::Method::GET, + &mcast_group_url(&group_b.identity.name.to_string()), + ) + .expect_status(Some(StatusCode::OK)), + ) + .authn_as(AuthnMode::SiloUser(admin_a.id)) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + + assert_eq!(read_b_by_a.identity.id, group_b.identity.id); + + let read_a_by_b: MulticastGroup = NexusRequest::new( + RequestBuilder::new( + client, + http::Method::GET, + &mcast_group_url(&group_a.identity.name.to_string()), + ) + .expect_status(Some(StatusCode::OK)), + ) + .authn_as(AuthnMode::SiloUser(admin_b.id)) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + + assert_eq!(read_a_by_b.identity.id, group_a.identity.id); +} + +/// Test that instances from different silos can attach to the same multicast +/// group when both silos have the multicast pool linked. +/// +/// Cross-silo multicast works by linking the same pool to multiple silos. +/// Pool linking is the mechanism of access control: a silo can only use +/// pools that are linked to it. +/// +/// This test verifies: +/// - Users in different silos (both linked to the pool) can join the same group +/// - Instances from Silo A can attach to a group +/// - Instances from Silo B can attach to the SAME group +/// - Both members can be listed together in the group membership +#[nexus_test] +async fn test_cross_silo_instance_attachment( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + create_default_ip_pool(&client).await; + + // Create multicast IP pool (fleet-scoped) + create_multicast_ip_pool(&client, "mcast-pool").await; + + // Get Silo A (default test silo) + let silo_a_url = format!("/v1/system/silos/{}", cptestctx.silo_name); + let silo_a: Silo = object_get(client, &silo_a_url).await; + link_ip_pool(&client, "default", &silo_a.identity.id, true).await; + link_ip_pool(&client, "mcast-pool", &silo_a.identity.id, false).await; + + // Create Silo B + let silo_b_params = SiloCreate { + identity: IdentityMetadataCreateParams { + name: "silo-b-cross".parse().unwrap(), + description: "Second silo for cross-silo instance attachment" + .to_string(), + }, + quotas: SiloQuotasCreate::empty(), + discoverable: false, + identity_mode: SiloIdentityMode::LocalOnly, + admin_group_name: None, + tls_certificates: vec![], + mapped_fleet_roles: Default::default(), + }; + + let silo_b: Silo = + NexusRequest::objects_post(client, "/v1/system/silos", &silo_b_params) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + + let silo_b_url = format!("/v1/system/silos/{}", silo_b.identity.name); + link_ip_pool(&client, "default", &silo_b.identity.id, true).await; + // Link mcast-pool to Silo B as well - cross-silo multicast works by + // linking the same pool to multiple silos (pool linking = access control) + link_ip_pool(&client, "mcast-pool", &silo_b.identity.id, false).await; + + // Create user in Silo A + let user_a = create_local_user( + client, + &silo_a, + &"user-a".parse().unwrap(), + UserPassword::LoginDisallowed, + ) + .await; + + grant_iam( + client, + &silo_a_url, + SiloRole::Collaborator, + user_a.id, + AuthnMode::PrivilegedUser, + ) + .await; + + // Create user in Silo B + let user_b = create_local_user( + client, + &silo_b, + &"user-b".parse().unwrap(), + UserPassword::LoginDisallowed, + ) + .await; + + grant_iam( + client, + &silo_b_url, + SiloRole::Collaborator, + user_b.id, + AuthnMode::PrivilegedUser, + ) + .await; + + // User A creates a project in Silo A + let project_a_params = ProjectCreate { + identity: IdentityMetadataCreateParams { + name: "project-silo-a".parse().unwrap(), + description: "Project in Silo A".to_string(), + }, + }; + + NexusRequest::new( + RequestBuilder::new(client, http::Method::POST, "/v1/projects") + .body(Some(&project_a_params)) + .expect_status(Some(StatusCode::CREATED)), + ) + .authn_as(AuthnMode::SiloUser(user_a.id)) + .execute() + .await + .unwrap(); + + // User B creates a project in Silo B + let project_b_params = ProjectCreate { + identity: IdentityMetadataCreateParams { + name: "project-silo-b".parse().unwrap(), + description: "Project in Silo B".to_string(), + }, + }; + + NexusRequest::new( + RequestBuilder::new(client, http::Method::POST, "/v1/projects") + .body(Some(&project_b_params)) + .expect_status(Some(StatusCode::CREATED)), + ) + .authn_as(AuthnMode::SiloUser(user_b.id)) + .execute() + .await + .unwrap(); + + // User A creates instance in Silo A's project + let instance_a_params = InstanceCreate { + identity: IdentityMetadataCreateParams { + name: "instance-silo-a".parse().unwrap(), + description: "Instance in Silo A".to_string(), + }, + ncpus: InstanceCpuCount::try_from(1).unwrap(), + memory: ByteCount::from_gibibytes_u32(1), + hostname: "instance-silo-a".parse::().unwrap(), + user_data: vec![], + ssh_public_keys: None, + network_interfaces: InstanceNetworkInterfaceAttachment::Default, + external_ips: vec![], + multicast_groups: vec![], + disks: vec![], + boot_disk: None, + cpu_platform: None, + start: false, + auto_restart_policy: Default::default(), + anti_affinity_groups: Vec::new(), + }; + + let instance_a: Instance = NexusRequest::new( + RequestBuilder::new( + client, + http::Method::POST, + "/v1/instances?project=project-silo-a", + ) + .body(Some(&instance_a_params)) + .expect_status(Some(StatusCode::CREATED)), + ) + .authn_as(AuthnMode::SiloUser(user_a.id)) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + + // User B creates instance in Silo B's project + let instance_b_params = InstanceCreate { + identity: IdentityMetadataCreateParams { + name: "instance-silo-b".parse().unwrap(), + description: "Instance in Silo B".to_string(), + }, + ncpus: InstanceCpuCount::try_from(1).unwrap(), + memory: ByteCount::from_gibibytes_u32(1), + hostname: "instance-silo-b".parse::().unwrap(), + user_data: vec![], + ssh_public_keys: None, + network_interfaces: InstanceNetworkInterfaceAttachment::Default, + external_ips: vec![], + multicast_groups: vec![], + disks: vec![], + boot_disk: None, + cpu_platform: None, + start: false, + auto_restart_policy: Default::default(), + anti_affinity_groups: Vec::new(), + }; + + let instance_b: Instance = NexusRequest::new( + RequestBuilder::new( + client, + http::Method::POST, + "/v1/instances?project=project-silo-b", + ) + .body(Some(&instance_b_params)) + .expect_status(Some(StatusCode::CREATED)), + ) + .authn_as(AuthnMode::SiloUser(user_b.id)) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + + // User A attaches their instance (from Silo A) to a new group (implicitly creates it) + let group_name = "cross-silo-group"; + let member_a_params = MulticastGroupMemberAdd { + instance: NameOrId::Id(instance_a.identity.id), + source_ips: None, + }; + let member_add_a_url = mcast_group_member_add_url( + group_name, + &member_a_params.instance, + "project-silo-a", + ); + + let member_a: MulticastGroupMember = NexusRequest::new( + RequestBuilder::new(client, http::Method::POST, &member_add_a_url) + .body(Some(&member_a_params)) + .expect_status(Some(StatusCode::CREATED)), + ) + .authn_as(AuthnMode::SiloUser(user_a.id)) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + + // Fetch the implicitly created group + let group: MulticastGroup = + object_get(client, &mcast_group_url(group_name)).await; + + assert_eq!(member_a.instance_id, instance_a.identity.id); + assert_eq!(member_a.multicast_group_id, group.identity.id); + + // User B attaches their instance (from Silo B) to the SAME fleet-scoped group + // This is the key test: cross-silo instance attachment should succeed + let member_b_params = MulticastGroupMemberAdd { + instance: NameOrId::Id(instance_b.identity.id), + source_ips: None, + }; + let member_add_b_url = mcast_group_member_add_url( + &group.identity.name.to_string(), + &member_b_params.instance, + "project-silo-b", + ); + + let member_b: MulticastGroupMember = NexusRequest::new( + RequestBuilder::new(client, http::Method::POST, &member_add_b_url) + .body(Some(&member_b_params)) + .expect_status(Some(StatusCode::CREATED)), + ) + .authn_as(AuthnMode::SiloUser(user_b.id)) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + + assert_eq!(member_b.instance_id, instance_b.identity.id); + assert_eq!(member_b.multicast_group_id, group.identity.id); + + // Both instances should be visible in the group's member list + let members_url = mcast_group_members_url(&group.identity.name.to_string()); + let members_response: dropshot::ResultsPage = + NexusRequest::object_get(client, &members_url) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + + let members = members_response.items; + + assert_eq!(members.len(), 2, "Should have 2 members (one from each silo)"); + + // Verify both instances are in the member list + let instance_ids: Vec<_> = members.iter().map(|m| m.instance_id).collect(); + assert!( + instance_ids.contains(&instance_a.identity.id), + "Instance from Silo A should be in member list" + ); + assert!( + instance_ids.contains(&instance_b.identity.id), + "Instance from Silo B should be in member list" + ); + + // Both users should be able to see the complete member list (both silos linked) + let members_by_a: dropshot::ResultsPage = + NexusRequest::object_get(client, &members_url) + .authn_as(AuthnMode::SiloUser(user_a.id)) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + + assert_eq!(members_by_a.items.len(), 2, "User A should see both members"); + + let members_by_b: dropshot::ResultsPage = + NexusRequest::object_get(client, &members_url) + .authn_as(AuthnMode::SiloUser(user_b.id)) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + + assert_eq!(members_by_b.items.len(), 2, "User B should see both members"); + + // Case: Cross-silo IP lookup + // User B (from Silo B, which has mcast-pool linked) can lookup the group + // by its IP address. Cross-silo access works because both silos are + // linked to the same pool. + let multicast_ip = group.multicast_ip; + let ip_lookup_url = format!("/v1/multicast-groups/{multicast_ip}"); + let ip_lookup_result: MulticastGroup = NexusRequest::new( + RequestBuilder::new(client, http::Method::GET, &ip_lookup_url) + .expect_status(Some(StatusCode::OK)), + ) + .authn_as(AuthnMode::SiloUser(user_b.id)) + .execute() + .await + .expect("User B should be able to lookup group by IP (pool linked to silo)") + .parsed_body() + .unwrap(); + + assert_eq!( + ip_lookup_result.identity.id, group.identity.id, + "IP lookup should return the correct group" + ); + assert_eq!( + ip_lookup_result.multicast_ip, multicast_ip, + "IP lookup result should have matching multicast_ip" + ); + + // Case: Cross-silo new group creation + // User B (from Silo B, which has mcast-pool linked) can create a new + // multicast group. Pool linking is the mechanism of access control. + let new_group_name = "user-b-created-group"; + let member_add_new_group_url = format!( + "{}/members?project=project-silo-b", + mcast_group_url(new_group_name) + ); + let new_group_member_params = MulticastGroupMemberAdd { + instance: NameOrId::Id(instance_b.identity.id), + source_ips: None, + }; + + // This should succeed because mcast-pool is linked to Silo B + let new_group_member: MulticastGroupMember = NexusRequest::new( + RequestBuilder::new( + client, + http::Method::POST, + &member_add_new_group_url, + ) + .body(Some(&new_group_member_params)) + .expect_status(Some(StatusCode::CREATED)), + ) + .authn_as(AuthnMode::SiloUser(user_b.id)) + .execute() + .await + .expect("User B should create new group (pool linked to silo)") + .parsed_body() + .unwrap(); + + assert_eq!( + new_group_member.instance_id, instance_b.identity.id, + "New group member should reference User B's instance" + ); + + // Verify the new group was created and is accessible + let new_group: MulticastGroup = + object_get(client, &mcast_group_url(new_group_name)).await; + assert_eq!( + new_group.identity.name.as_str(), + new_group_name, + "New group should have correct name" + ); + + // Remove member from new group (triggers implicit deletion) + let new_group_member_delete_url = format!( + "{}/members/{}", + mcast_group_url(new_group_name), + instance_b.identity.id + ); + NexusRequest::new( + RequestBuilder::new( + client, + http::Method::DELETE, + &new_group_member_delete_url, + ) + .expect_status(Some(StatusCode::NO_CONTENT)), + ) + .authn_as(AuthnMode::SiloUser(user_b.id)) + .execute() + .await + .expect("Should clean up new group member"); + + // Re-add User B's instance to the original group for subsequent tests + let rejoin_member: MulticastGroupMember = NexusRequest::new( + RequestBuilder::new(client, http::Method::POST, &member_add_b_url) + .body(Some(&member_b_params)) + .expect_status(Some(StatusCode::CREATED)), + ) + .authn_as(AuthnMode::SiloUser(user_b.id)) + .execute() + .await + .expect("User B should rejoin original group") + .parsed_body() + .unwrap(); + + assert_eq!(rejoin_member.instance_id, instance_b.identity.id); + + // Case: Cross-silo detach + + // User A CANNOT detach User B's instance (404 - can't see Silo B's instance) + // Using instance ID since we're crossing silo boundaries + let member_delete_b_by_a_url = format!( + "{}/{}", + mcast_group_members_url(&group.identity.name.to_string()), + instance_b.identity.id, + ); + + NexusRequest::new( + RequestBuilder::new( + client, + http::Method::DELETE, + &member_delete_b_by_a_url, + ) + .expect_status(Some(StatusCode::NOT_FOUND)), + ) + .authn_as(AuthnMode::SiloUser(user_a.id)) + .execute() + .await + .expect("User A should get 404 when trying to detach Silo B's instance"); + + // User B CAN detach their own instance from the group (even though owned by different silo) + let member_delete_b_url = format!( + "{}/{}", + mcast_group_members_url(&group.identity.name.to_string()), + instance_b.identity.id, + ); + + NexusRequest::new( + RequestBuilder::new(client, http::Method::DELETE, &member_delete_b_url) + .expect_status(Some(StatusCode::NO_CONTENT)), + ) + .authn_as(AuthnMode::SiloUser(user_b.id)) + .execute() + .await + .expect("User B should be able to detach their own instance"); + + // Verify only User A's instance remains + let members_after_detach: dropshot::ResultsPage = + NexusRequest::object_get(client, &members_url) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + + assert_eq!( + members_after_detach.items.len(), + 1, + "Should have 1 member after Silo B's instance detached" + ); + assert_eq!( + members_after_detach.items[0].instance_id, instance_a.identity.id, + "Remaining member should be Silo A's instance" + ); +} + +/// Test that both member-add endpoints have identical permission behavior +/// when project-level IAM grants are used. +/// +/// This verifies that: +/// - `/v1/multicast-groups/{group}/members` (group-centric) +/// - `/v1/instances/{instance}/multicast-groups/{group}` (instance-centric) +/// +/// Both enforce the same Instance::Modify permission check and return 404 (not 403) +/// when a user without permission tries to add an instance, and both succeed when +/// the user is granted project-level access. +#[nexus_test] +async fn test_both_member_endpoints_have_same_permissions( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + // create_default_ip_pool already links "default" pool to the DEFAULT_SILO + create_default_ip_pool(&client).await; + + // Create multicast pool (already linked to DEFAULT_SILO by helper) + create_multicast_ip_pool(&client, "mcast-pool").await; + + // Get the DEFAULT silo (same silo as PrivilegedUser) + // This ensures that when we create a project using AuthnMode::PrivilegedUser, + // it will be created in the same silo as our users + use nexus_db_queries::db::fixed_data::silo::DEFAULT_SILO; + let silo_url = format!("/v1/system/silos/{}", DEFAULT_SILO.identity().name); + let silo: Silo = object_get(client, &silo_url).await; + + // Create User A (project owner) in the default silo + let user_a = create_local_user( + client, + &silo, + &"user-a".parse().unwrap(), + UserPassword::LoginDisallowed, + ) + .await; + + grant_iam( + client, + &silo_url, + SiloRole::Collaborator, + user_a.id, + AuthnMode::PrivilegedUser, + ) + .await; + + // Create User B (unprivileged) in the same silo + // User B intentionally has NO silo-level roles - they're just a regular user + let user_b = create_local_user( + client, + &silo, + &"user-b".parse().unwrap(), + UserPassword::LoginDisallowed, + ) + .await; + + // Create the project as PrivilegedUser so we can grant IAM on it + let project_a = create_project(client, "parity-test-project").await; + + // Grant User A access to the project + let project_a_url = format!("/v1/projects/{}", project_a.identity.name); + grant_iam( + client, + &project_a_url, + ProjectRole::Collaborator, + user_a.id, + AuthnMode::PrivilegedUser, + ) + .await; + + // User A creates an instance in the project + let instance_a_name = "parity-test-instance"; + let instance_params = InstanceCreate { + identity: IdentityMetadataCreateParams { + name: instance_a_name.parse().unwrap(), + description: "Instance for parity test".to_string(), + }, + ncpus: InstanceCpuCount::try_from(2).unwrap(), + memory: ByteCount::from_gibibytes_u32(1), + hostname: instance_a_name.parse().unwrap(), + user_data: vec![], + ssh_public_keys: None, + network_interfaces: InstanceNetworkInterfaceAttachment::Default, + external_ips: vec![], + multicast_groups: vec![], + disks: vec![], + boot_disk: None, + cpu_platform: None, + start: false, + auto_restart_policy: Default::default(), + anti_affinity_groups: Vec::new(), + }; + let instance_url = + format!("/v1/instances?project={}", project_a.identity.name); + let instance_a: Instance = NexusRequest::new( + RequestBuilder::new(client, http::Method::POST, &instance_url) + .body(Some(&instance_params)) + .expect_status(Some(StatusCode::CREATED)), + ) + .authn_as(AuthnMode::SiloUser(user_a.id)) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + + // User A adds the instance as a member (implicitly creates the group) + let group_name = "parity-test-group"; + let member_params_create = MulticastGroupMemberAdd { + instance: NameOrId::Id(instance_a.identity.id), + source_ips: None, + }; + // When using instance ID, do not provide ?project= parameter (causes 400 Bad Request) + let member_add_url = mcast_group_member_add_url( + group_name, + &member_params_create.instance, + project_a.identity.name.as_str(), + ); + NexusRequest::new( + RequestBuilder::new(client, http::Method::POST, &member_add_url) + .body(Some(&member_params_create)) + .expect_status(Some(StatusCode::CREATED)), + ) + .authn_as(AuthnMode::SiloUser(user_a.id)) + .execute() + .await + .unwrap(); + + // Fetch the implicitly created group + let group_a: MulticastGroup = + object_get(client, &mcast_group_url(group_name)).await; + + // Case: Permission enforcement without project access + + // Build URLs for both endpoints + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Id(instance_a.identity.id), + source_ips: None, + }; + let group_centric_url = mcast_group_member_add_url( + &group_a.identity.name.to_string(), + &member_params.instance, + project_a.identity.name.as_str(), + ); + + let instance_centric_url = format!( + "/v1/instances/{}/multicast-groups/{}", + instance_a.identity.id, group_a.identity.id + ); + let inst_body = serde_json::json!({}); + + // User B should get 404 via the group-centric endpoint (no access to instance) + NexusRequest::new( + RequestBuilder::new(client, http::Method::POST, &group_centric_url) + .body(Some(&member_params)) + .expect_status(Some(StatusCode::NOT_FOUND)), + ) + .authn_as(AuthnMode::SiloUser(user_b.id)) + .execute() + .await + .expect( + "User B should get 404 via group-centric endpoint without permission", + ); + + // User B should ALSO get 404 via the instance-centric endpoint (same permission check) + NexusRequest::new( + RequestBuilder::new(client, http::Method::PUT, &instance_centric_url) + .body(Some(&inst_body)) + .expect_status(Some(StatusCode::NOT_FOUND)), + ) + .authn_as(AuthnMode::SiloUser(user_b.id)) + .execute() + .await + .expect("User B should get 404 via instance-centric endpoint without permission"); + + // Case: Permission enforcement with project-level access + + // Grant User B project-level Collaborator access to User A's project + // Use PrivilegedUser to grant IAM (requires Project::ModifyPolicy permission) + let project_a_url = format!("/v1/projects/{}", project_a.identity.name); + grant_iam( + client, + &project_a_url, + ProjectRole::Collaborator, + user_b.id, + AuthnMode::PrivilegedUser, + ) + .await; + + // Create a second instance in the project (User A still owns it, but User B now has access) + let instance_b = create_instance( + client, + project_a.identity.name.as_str(), + "parity-test-instance-2", + ) + .await; + + // User B should now succeed via the group-centric endpoint (has Instance::Modify permission) + let member_b_params = MulticastGroupMemberAdd { + instance: NameOrId::Id(instance_b.identity.id), + source_ips: None, + }; + let group_centric_url_b = mcast_group_member_add_url( + &group_a.identity.name.to_string(), + &member_b_params.instance, + project_a.identity.name.as_str(), + ); + let member: MulticastGroupMember = NexusRequest::new( + RequestBuilder::new(client, http::Method::POST, &group_centric_url_b) + .body(Some(&member_b_params)) + .expect_status(Some(StatusCode::CREATED)), + ) + .authn_as(AuthnMode::SiloUser(user_b.id)) + .execute() + .await + .expect("User B should succeed via group-centric endpoint with permission") + .parsed_body() + .unwrap(); + + assert_eq!(member.instance_id, instance_b.identity.id); + assert_eq!(member.multicast_group_id, group_a.identity.id); + + // Create a third instance for testing the instance-centric endpoint + let instance_c = create_instance( + client, + project_a.identity.name.as_str(), + "parity-test-instance-3", + ) + .await; + + // User B should ALSO succeed via the instance-centric endpoint (same permission check) + let instance_centric_url_c = format!( + "/v1/instances/{}/multicast-groups/{}", + instance_c.identity.id, group_a.identity.id + ); + NexusRequest::new( + RequestBuilder::new(client, http::Method::PUT, &instance_centric_url_c) + .body(Some(&inst_body)) + .expect_status(Some(StatusCode::CREATED)), + ) + .authn_as(AuthnMode::SiloUser(user_b.id)) + .execute() + .await + .expect( + "User B should succeed via instance-centric endpoint with permission", + ); + + // This verifies both endpoints have identical permission behavior: + // - Without permission: both return 404 + // - With project-level access granted: both succeed with 201 Created +} + +/// Test that a silo cannot use a multicast pool that is not linked to it. +/// +/// Pool linking is the access control mechanism for multicast. A silo can only +/// use multicast pools that are explicitly linked to it. This test verifies +/// that a user in Silo B cannot join a multicast group when the pool is only +/// linked to Silo A. +#[nexus_test] +async fn test_silo_cannot_use_unlinked_pool( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + create_default_ip_pool(&client).await; + + // Create multicast IP pool (fleet-scoped) + create_multicast_ip_pool(&client, "mcast-pool").await; + + // Get Silo A (default test silo) and link pools to it + let silo_a_url = format!("/v1/system/silos/{}", cptestctx.silo_name); + let silo_a: Silo = object_get(client, &silo_a_url).await; + link_ip_pool(&client, "default", &silo_a.identity.id, true).await; + link_ip_pool(&client, "mcast-pool", &silo_a.identity.id, false).await; + + // Create Silo B (but do not link mcast-pool to it) + let silo_b_params = SiloCreate { + identity: IdentityMetadataCreateParams { + name: "silo-b-unlinked".parse().unwrap(), + description: "Silo without multicast pool linked".to_string(), + }, + quotas: SiloQuotasCreate::empty(), + discoverable: false, + identity_mode: SiloIdentityMode::LocalOnly, + admin_group_name: None, + tls_certificates: vec![], + mapped_fleet_roles: Default::default(), + }; + + let silo_b: Silo = + NexusRequest::objects_post(client, "/v1/system/silos", &silo_b_params) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + + let silo_b_url = format!("/v1/system/silos/{}", silo_b.identity.name); + + // Link only the default pool to Silo B (not the mcast-pool) + link_ip_pool(&client, "default", &silo_b.identity.id, true).await; + + // Create user in Silo B + let user_b = create_local_user( + client, + &silo_b, + &"user-b-unlinked".parse().unwrap(), + UserPassword::LoginDisallowed, + ) + .await; + + grant_iam( + client, + &silo_b_url, + SiloRole::Collaborator, + user_b.id, + AuthnMode::PrivilegedUser, + ) + .await; + + // User B creates a project and instance in Silo B + let project_params = ProjectCreate { + identity: IdentityMetadataCreateParams { + name: "project-silo-b".parse().unwrap(), + description: "Project in Silo B".to_string(), + }, + }; + + NexusRequest::new( + RequestBuilder::new(client, http::Method::POST, "/v1/projects") + .body(Some(&project_params)) + .expect_status(Some(StatusCode::CREATED)), + ) + .authn_as(AuthnMode::SiloUser(user_b.id)) + .execute() + .await + .expect("User B should create project in Silo B"); + + let instance_params = InstanceCreate { + identity: IdentityMetadataCreateParams { + name: "instance-silo-b".parse().unwrap(), + description: "Instance in Silo B".to_string(), + }, + ncpus: InstanceCpuCount::try_from(1).unwrap(), + memory: ByteCount::from_gibibytes_u32(1), + hostname: "instance-silo-b".parse::().unwrap(), + user_data: vec![], + ssh_public_keys: None, + network_interfaces: InstanceNetworkInterfaceAttachment::Default, + external_ips: vec![], + multicast_groups: vec![], + disks: vec![], + boot_disk: None, + cpu_platform: None, + start: false, + auto_restart_policy: Default::default(), + anti_affinity_groups: Vec::new(), + }; + + NexusRequest::new( + RequestBuilder::new( + client, + http::Method::POST, + "/v1/instances?project=project-silo-b", + ) + .body(Some(&instance_params)) + .expect_status(Some(StatusCode::CREATED)), + ) + .authn_as(AuthnMode::SiloUser(user_b.id)) + .execute() + .await + .expect("User B should create instance in Silo B"); + + // User B tries to join a multicast group - should fail because mcast-pool + // is not linked to Silo B + let member_add_url = + "/v1/multicast-groups/test-group/members?project=project-silo-b"; + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name("instance-silo-b".parse().unwrap()), + source_ips: None, + }; + + let error = NexusRequest::new( + RequestBuilder::new(client, http::Method::POST, member_add_url) + .body(Some(&member_params)) + .expect_status(Some(StatusCode::BAD_REQUEST)), + ) + .authn_as(AuthnMode::SiloUser(user_b.id)) + .execute() + .await + .expect("User B should get 400 when no pool is linked to their silo"); + + // Verify the error indicates no pool was found + let error_body: dropshot::HttpErrorResponseBody = + error.parsed_body().unwrap(); + assert!( + error_body.message.contains("pool") + || error_body.message.contains("multicast"), + "Error should indicate no pool available, got: {}", + error_body.message + ); +} diff --git a/nexus/tests/integration_tests/multicast/cache_invalidation.rs b/nexus/tests/integration_tests/multicast/cache_invalidation.rs index 7e028239bc5..da5a04d1b02 100644 --- a/nexus/tests/integration_tests/multicast/cache_invalidation.rs +++ b/nexus/tests/integration_tests/multicast/cache_invalidation.rs @@ -2,9 +2,14 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! Integration test for multicast reconciler cache invalidation. - -use std::net::IpAddr; +//! Integration tests for multicast reconciler cache invalidation. +//! +//! Tests inventory and backplane caches used by the multicast reconciler: +//! +//! - Sled move detection: When a sled moves to a different switch port, the +//! reconciler detects this via inventory and updates DPD port mappings +//! - Cache TTL refresh: Verifies caches are refreshed when TTL expires +//! - Backplane cache expiry: Tests that stale backplane mappings are cleaned up use gateway_client::types::{PowerState, RotState, SpState}; use nexus_db_queries::context::OpContext; @@ -14,9 +19,10 @@ use nexus_test_utils::resource_helpers::{ }; use nexus_test_utils_macros::nexus_test; use nexus_types::deployment::SledFilter; -use nexus_types::external_api::params::MulticastGroupCreate; +use nexus_types::external_api::params::MulticastGroupMemberAdd; +use nexus_types::external_api::views::MulticastGroupMember; use nexus_types::inventory::SpType; -use omicron_common::api::external::IdentityMetadataCreateParams; +use omicron_common::api::external::NameOrId; use omicron_nexus::Server; use omicron_nexus::TestInterfaces; use omicron_uuid_kinds::{GenericUuid, InstanceUuid, MulticastGroupUuid}; @@ -47,41 +53,40 @@ async fn test_sled_move_updates_multicast_port_mapping( let log = &cptestctx.logctx.log; let opctx = OpContext::for_tests(log.clone(), datastore.clone()); - // Create project and multicast IP pool - create_default_ip_pool(client).await; - create_project(client, PROJECT_NAME).await; - let pool = create_multicast_ip_pool(client, "sled-move-pool").await; - - // Create multicast group - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: GROUP_NAME.parse().unwrap(), - description: "Group for sled move test".to_string(), - }, - multicast_ip: Some("224.0.1.200".parse::().unwrap()), - source_ips: None, - pool: Some(omicron_common::api::external::NameOrId::Name( - pool.identity.name.clone(), - )), - mvlan: None, - }; - - object_create::<_, nexus_types::external_api::views::MulticastGroup>( - client, - &super::mcast_groups_url(), - ¶ms, + // Create project and pools in parallel + let (_, _, _) = ops::join3( + create_default_ip_pool(client), + create_project(client, PROJECT_NAME), + create_multicast_ip_pool(client, "sled-move-pool"), ) .await; - // Create instance and attach to multicast group + // Create instance (no multicast groups at creation - implicit model) let instance = instance_for_multicast_groups( cptestctx, PROJECT_NAME, INSTANCE_NAME, true, - &[GROUP_NAME], + &[], + ) + .await; + + // Add instance to multicast group + let member_add_url = format!( + "{}?project={PROJECT_NAME}", + mcast_group_members_url(GROUP_NAME) + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Id(instance.identity.id), + source_ips: None, + }; + object_create::<_, MulticastGroupMember>( + client, + &member_add_url, + &member_params, ) .await; + wait_for_group_active(client, GROUP_NAME).await; let instance_uuid = InstanceUuid::from_untyped_uuid(instance.identity.id); @@ -97,7 +102,7 @@ async fn test_sled_move_updates_multicast_port_mapping( // Verify initial port mapping (based on current inventory `sp_slot`) verify_inventory_based_port_mapping(cptestctx, &instance_uuid) .await - .expect("initial port mapping verification"); + .expect("Should verify initial port mapping"); // Assert that the member is in Joined state let members_before = list_multicast_group_members(client, GROUP_NAME).await; @@ -111,33 +116,33 @@ async fn test_sled_move_updates_multicast_port_mapping( let sled_id = nexus .active_instance_info(&instance_uuid, None) .await - .expect("active_instance_info call succeeds") - .expect("instance should be on a sled") + .expect("Active instance info should be available") + .expect("Instance should be on a sled") .sled_id; // Get sled baseboard information let sleds = datastore .sled_list_all_batched(&opctx, SledFilter::InService) .await - .expect("list in-service sleds"); + .expect("Should list in-service sleds"); let sled = sleds .into_iter() .find(|s| s.id() == sled_id) - .expect("found sled in database"); + .expect("Should find sled in database"); // Get current inventory to see the original sp_slot let original_inventory = datastore .inventory_get_latest_collection(&opctx) .await - .expect("fetch latest inventory collection") - .expect("inventory collection should exist"); + .expect("Should fetch latest inventory collection") + .expect("Inventory collection should exist"); let original_sp = original_inventory .sps .iter() .find(|(bb, _)| bb.serial_number == sled.serial_number()) .map(|(_, sp)| sp) - .expect("found SP for sled in original inventory"); + .expect("Should find SP for sled in original inventory"); let original_slot = original_sp.sp_slot; let sled_serial = sled.serial_number().to_string(); @@ -147,13 +152,16 @@ async fn test_sled_move_updates_multicast_port_mapping( let dpd = nexus_test_utils::dpd_client(cptestctx); let original_port_id = dpd_client::types::PortId::Rear( dpd_client::types::Rear::try_from(format!("rear{original_slot}")) - .expect("valid rear port string"), + .expect("Should be valid rear port string"), ); // Determine a valid target slot by querying DPD's backplane map. // Prefer a different slot if available; otherwise fall back to the same. - let backplane = - dpd.backplane_map().await.expect("fetch backplane map").into_inner(); + let backplane = dpd + .backplane_map() + .await + .expect("Should fetch backplane map") + .into_inner(); let mut valid_slots: Vec = backplane .keys() .filter_map(|k| { @@ -193,7 +201,7 @@ async fn test_sled_move_updates_multicast_port_mapping( datastore .inventory_insert_collection(&opctx, &new_collection) .await - .expect("insert new inventory collection"); + .expect("Should insert new inventory collection"); // Invalidate multicast caches to force refresh from new inventory nexus.invalidate_multicast_caches(); @@ -206,7 +214,7 @@ async fn test_sled_move_updates_multicast_port_mapping( // on rear{`sp_slot`}, so it will verify the new mapping is right verify_inventory_based_port_mapping(cptestctx, &instance_uuid) .await - .expect("port mapping should be updated after cache invalidation"); + .expect("Port mapping should be updated after cache invalidation"); // Assert that the member is still in "Joined" state after the move let members_after = list_multicast_group_members(client, GROUP_NAME).await; @@ -222,12 +230,12 @@ async fn test_sled_move_updates_multicast_port_mapping( // Verify stale port cleanup: fetch DPD state and ensure old port was removed let members = datastore - .multicast_group_members_list_by_instance(&opctx, instance_uuid, false) + .multicast_group_members_list_by_instance(&opctx, instance_uuid) .await - .expect("list multicast members for instance"); + .expect("Should list multicast members for instance"); let member = members .first() - .expect("instance should have at least one multicast membership"); + .expect("Instance should have at least one multicast membership"); let external_group = datastore .multicast_group_fetch( @@ -235,21 +243,21 @@ async fn test_sled_move_updates_multicast_port_mapping( MulticastGroupUuid::from_untyped_uuid(member.external_group_id), ) .await - .expect("fetch external multicast group"); + .expect("Should fetch external multicast group"); let underlay_group_id = external_group .underlay_group_id - .expect("external group should have underlay_group_id"); + .expect("External group should have underlay_group_id"); let underlay_group = datastore .underlay_multicast_group_fetch(&opctx, underlay_group_id) .await - .expect("fetch underlay multicast group"); + .expect("Should fetch underlay multicast group"); let dpd_client = nexus_test_utils::dpd_client(cptestctx); let underlay_group_response = dpd_client .multicast_group_get(&underlay_group.multicast_ip.ip()) .await - .expect("DPD multicast_group_get succeeds") + .expect("DPD multicast_group_get should succeed") .into_inner(); let dpd_members = match underlay_group_response { @@ -275,12 +283,12 @@ async fn test_sled_move_updates_multicast_port_mapping( /// Test that cache TTL expiry automatically refreshes sled-to-port mappings: /// -/// - Start test server with sled_cache_ttl = 2 seconds +/// - Start test server with sled_cache_ttl = 1 second /// - Create multicast group and instance, wait for member to join /// - Insert new inventory with different `sp_slot` (simulating sled move) -/// - Wait for TTL to expire (sleep 3 seconds) +/// - Wait for TTL to expire (sleep 1.5 seconds) /// - Activate reconciler (which should refresh cache due to TTL) -/// - Verify DPD uses the new rear port +/// - Verify DPD uses the new rear port #[tokio::test] async fn test_cache_ttl_driven_refresh() { const PROJECT_NAME: &str = "ttl-test-project"; @@ -318,42 +326,41 @@ async fn test_cache_ttl_driven_refresh() { let client = &cptestctx.external_client; - // Create project and multicast IP pool - create_default_ip_pool(client).await; - create_project(client, PROJECT_NAME).await; - let pool = create_multicast_ip_pool(client, "ttl-test-pool").await; - - // Create multicast group - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: GROUP_NAME.parse().unwrap(), - description: "Group for TTL refresh test".to_string(), - }, - multicast_ip: Some("224.0.1.210".parse::().unwrap()), - source_ips: None, - pool: Some(omicron_common::api::external::NameOrId::Name( - pool.identity.name.clone(), - )), - mvlan: None, - }; - - object_create::<_, nexus_types::external_api::views::MulticastGroup>( - client, - &super::mcast_groups_url(), - ¶ms, + // Create project and pools in parallel + let (_, _, _) = ops::join3( + create_default_ip_pool(client), + create_project(client, PROJECT_NAME), + create_multicast_ip_pool(client, "ttl-test-pool"), ) .await; - // Create instance and attach to multicast group + // Create instance (no multicast groups at creation - implicit model) let instance = instance_for_multicast_groups( &cptestctx, PROJECT_NAME, INSTANCE_NAME, true, - &[GROUP_NAME], + &[], ) .await; + // Add instance to multicast group + let member_add_url = format!( + "{}?project={PROJECT_NAME}", + mcast_group_members_url(GROUP_NAME) + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Id(instance.identity.id), + source_ips: None, + }; + object_create::<_, MulticastGroupMember>( + client, + &member_add_url, + &member_params, + ) + .await; + wait_for_group_active(client, GROUP_NAME).await; + let instance_uuid = InstanceUuid::from_untyped_uuid(instance.identity.id); // Wait for member to join @@ -368,39 +375,39 @@ async fn test_cache_ttl_driven_refresh() { // Verify initial port mapping (this populates the cache) verify_inventory_based_port_mapping(&cptestctx, &instance_uuid) .await - .expect("initial port mapping verification"); + .expect("Should verify initial port mapping"); // Get the sled this instance is running on let sled_id = nexus .active_instance_info(&instance_uuid, None) .await - .expect("active_instance_info call succeeds") - .expect("instance should be on a sled") + .expect("Active instance info should be available") + .expect("Instance should be on a sled") .sled_id; // Get sled baseboard information let sleds = datastore .sled_list_all_batched(&opctx, SledFilter::InService) .await - .expect("list in-service sleds"); + .expect("Should list in-service sleds"); let sled = sleds .into_iter() .find(|s| s.id() == sled_id) - .expect("found sled in database"); + .expect("Should find sled in database"); // Get current inventory to see the original sp_slot let original_inventory = datastore .inventory_get_latest_collection(&opctx) .await - .expect("fetch latest inventory collection") - .expect("inventory collection should exist"); + .expect("Should fetch latest inventory collection") + .expect("Inventory collection should exist"); let original_sp = original_inventory .sps .iter() .find(|(bb, _)| bb.serial_number == sled.serial_number()) .map(|(_, sp)| sp) - .expect("found SP for sled in original inventory"); + .expect("Should find SP for sled in original inventory"); let original_slot = original_sp.sp_slot; let sled_serial = sled.serial_number().to_string(); @@ -409,8 +416,11 @@ async fn test_cache_ttl_driven_refresh() { // Determine a valid target slot by querying DPD's backplane map. // Prefer a different slot if available; otherwise fall back to the same. let dpd = nexus_test_utils::dpd_client(&cptestctx); - let backplane = - dpd.backplane_map().await.expect("fetch backplane map").into_inner(); + let backplane = dpd + .backplane_map() + .await + .expect("Should fetch backplane map") + .into_inner(); let mut valid_slots: Vec = backplane .keys() .filter_map(|k| { @@ -451,11 +461,11 @@ async fn test_cache_ttl_driven_refresh() { datastore .inventory_insert_collection(&opctx, &new_collection) .await - .expect("insert new inventory collection"); + .expect("Should insert new inventory collection"); - // Wait for cache TTL to expire (sled_cache_ttl = 2 seconds) - // Sleep for 3 seconds to ensure TTL has expired - tokio::time::sleep(std::time::Duration::from_secs(3)).await; + // Wait for cache TTL to expire (sled_cache_ttl = 1 second) + // Sleep for 1.5 seconds to ensure TTL has expired + tokio::time::sleep(std::time::Duration::from_millis(1500)).await; wait_for_condition_with_reconciler( &cptestctx.lockstep_client, @@ -479,7 +489,7 @@ async fn test_cache_ttl_driven_refresh() { &MULTICAST_OPERATION_TIMEOUT, ) .await - .expect("DPD should be updated with new rear port after TTL expiry"); + .expect("DPD should update with new rear port after TTL expiry"); cptestctx.teardown().await; } @@ -529,41 +539,40 @@ async fn test_backplane_cache_ttl_expiry() { let client = &cptestctx.external_client; - // Create project and multicast IP pool - create_default_ip_pool(client).await; - create_project(client, PROJECT_NAME).await; - let pool = create_multicast_ip_pool(client, "backplane-ttl-pool").await; - - // Create multicast group - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: GROUP_NAME.parse().unwrap(), - description: "Group for backplane TTL test".to_string(), - }, - multicast_ip: Some("224.0.1.230".parse::().unwrap()), - source_ips: None, - pool: Some(omicron_common::api::external::NameOrId::Name( - pool.identity.name.clone(), - )), - mvlan: None, - }; - - object_create::<_, nexus_types::external_api::views::MulticastGroup>( - client, - &super::mcast_groups_url(), - ¶ms, + // Create project and pools in parallel + let (_, _, _) = ops::join3( + create_default_ip_pool(client), + create_project(client, PROJECT_NAME), + create_multicast_ip_pool(client, "backplane-ttl-pool"), ) .await; - // Create instance and attach to multicast group + // Create instance (no multicast groups at creation - implicit model) let instance = instance_for_multicast_groups( &cptestctx, PROJECT_NAME, INSTANCE_NAME, true, - &[GROUP_NAME], + &[], + ) + .await; + + // Add instance to multicast group + let member_add_url = format!( + "{}?project={PROJECT_NAME}", + mcast_group_members_url(GROUP_NAME) + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Id(instance.identity.id), + source_ips: None, + }; + object_create::<_, MulticastGroupMember>( + client, + &member_add_url, + &member_params, ) .await; + wait_for_group_active(client, GROUP_NAME).await; let instance_uuid = InstanceUuid::from_untyped_uuid(instance.identity.id); @@ -579,11 +588,11 @@ async fn test_backplane_cache_ttl_expiry() { // Verify initial port mapping (confirms both caches are populated) verify_inventory_based_port_mapping(&cptestctx, &instance_uuid) .await - .expect("initial port mapping verification"); + .expect("Should verify initial port mapping"); - // Wait for backplane cache TTL to expire (1 second) but not sled cache (10 seconds) - // Sleep for 2 seconds to ensure backplane TTL has expired - tokio::time::sleep(std::time::Duration::from_secs(2)).await; + // Wait for backplane cache TTL to expire (500ms) but not sled cache (5 seconds) + // Sleep for 1 second to ensure backplane TTL has expired + tokio::time::sleep(std::time::Duration::from_secs(1)).await; // Force cache access by triggering reconciler // This will cause the reconciler to check backplane cache, find it expired, @@ -593,7 +602,7 @@ async fn test_backplane_cache_ttl_expiry() { // Verify member is still on the right port after backplane cache refresh verify_inventory_based_port_mapping(&cptestctx, &instance_uuid) .await - .expect("port mapping after backplane cache TTL expiry"); + .expect("Port mapping should work after backplane cache TTL expiry"); // Verify member is still in "Joined" state let members = list_multicast_group_members(client, GROUP_NAME).await; diff --git a/nexus/tests/integration_tests/multicast/enablement.rs b/nexus/tests/integration_tests/multicast/enablement.rs index b172a0b8753..c7e90630b4a 100644 --- a/nexus/tests/integration_tests/multicast/enablement.rs +++ b/nexus/tests/integration_tests/multicast/enablement.rs @@ -6,16 +6,10 @@ //! //! TODO: Remove once we have full multicast support in PROD. -use std::net::IpAddr; - use nexus_test_utils::resource_helpers::{ - create_default_ip_pool, create_project, object_create, object_get, -}; -use nexus_types::external_api::params::MulticastGroupCreate; -use nexus_types::external_api::views::MulticastGroup; -use omicron_common::api::external::{ - IdentityMetadataCreateParams, Instance, InstanceState, NameOrId, + create_default_ip_pool, create_project, object_get, }; +use omicron_common::api::external::{Instance, InstanceState}; use omicron_uuid_kinds::{GenericUuid, InstanceUuid}; use super::*; @@ -43,25 +37,16 @@ async fn test_multicast_enablement() { let client = &cptestctx.external_client; - // Set up project and multicast infrastructure - create_default_ip_pool(&client).await; - create_project(client, PROJECT_NAME).await; - let _pool = create_multicast_ip_pool(client, "test-pool").await; - - // Create a multicast group - let group_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: GROUP_NAME.parse().unwrap(), - description: "Test group for enablement testing".to_string(), - }, - multicast_ip: Some("224.0.1.100".parse::().unwrap()), - source_ips: None, - pool: Some(NameOrId::Name("test-pool".parse().unwrap())), - mvlan: None, - }; - - let group_url = "/v1/multicast-groups".to_string(); - object_create::<_, MulticastGroup>(client, &group_url, &group_params).await; + // Create project and pools in parallel + let (_, _, _) = ops::join3( + create_default_ip_pool(&client), + create_project(client, PROJECT_NAME), + create_multicast_ip_pool(client, "test-pool"), + ) + .await; + + // Note: With the implicit creation model, groups are created when first member joins. + // When multicast is disabled, instance create should not implicitly create groups/members. // Create instance with multicast groups specified // This should succeed even with multicast disabled @@ -77,13 +62,21 @@ async fn test_multicast_enablement() { // Verify instance was created successfully assert_eq!(instance.identity.name, "test-instance-lifecycle"); - // Verify NO multicast members were created (since multicast is disabled) - let members = list_multicast_group_members(client, GROUP_NAME).await; - assert_eq!( - members.len(), - 0, - "No multicast members should be created when disabled" - ); + // Verify the group doesn't exist at all (since multicast is disabled and no members were created) + // With implicit creation, groups only exist when they have members + let group_url = mcast_group_url(GROUP_NAME); + nexus_test_utils::http_testing::NexusRequest::new( + nexus_test_utils::http_testing::RequestBuilder::new( + client, + http::Method::GET, + &group_url, + ) + .expect_status(Some(http::StatusCode::NOT_FOUND)), + ) + .authn_as(nexus_test_utils::http_testing::AuthnMode::PrivilegedUser) + .execute() + .await + .expect("Should get 404 response"); // Start the instance - this should also succeed let start_url = format!( @@ -116,13 +109,20 @@ async fn test_multicast_enablement() { ) .await; - // Still no multicast members should exist - let members = list_multicast_group_members(client, GROUP_NAME).await; - assert_eq!( - members.len(), - 0, - "No multicast members should be created during start when disabled" - ); + // Verify the group still doesn't exist (multicast disabled, no members created) + let group_url_after_start = mcast_group_url(GROUP_NAME); + nexus_test_utils::http_testing::NexusRequest::new( + nexus_test_utils::http_testing::RequestBuilder::new( + client, + http::Method::GET, + &group_url_after_start, + ) + .expect_status(Some(http::StatusCode::NOT_FOUND)), + ) + .authn_as(nexus_test_utils::http_testing::AuthnMode::PrivilegedUser) + .execute() + .await + .expect("Should get 404 response after instance start"); // Stop the instance - this should also succeed let stop_url = format!( @@ -155,13 +155,20 @@ async fn test_multicast_enablement() { ) .await; - // Still no multicast members should exist - let members = list_multicast_group_members(client, GROUP_NAME).await; - assert_eq!( - members.len(), - 0, - "No multicast members should be created during stop when disabled" - ); + // Verify the group still doesn't exist (multicast disabled, no members created) + let group_url_after_stop = mcast_group_url(GROUP_NAME); + nexus_test_utils::http_testing::NexusRequest::new( + nexus_test_utils::http_testing::RequestBuilder::new( + client, + http::Method::GET, + &group_url_after_stop, + ) + .expect_status(Some(http::StatusCode::NOT_FOUND)), + ) + .authn_as(nexus_test_utils::http_testing::AuthnMode::PrivilegedUser) + .execute() + .await + .expect("Should get 404 response after instance stop"); // Wait for instance to be fully stopped before attempting deletion let get_url = @@ -179,13 +186,20 @@ async fn test_multicast_enablement() { nexus_test_utils::resource_helpers::object_delete(client, &delete_url) .await; - // Verify no multicast state was ever created - let members = list_multicast_group_members(client, GROUP_NAME).await; - assert_eq!( - members.len(), - 0, - "No multicast members should exist after instance deletion when disabled" - ); + // Verify no multicast state was ever created (group still doesn't exist) + let group_url_after_delete = mcast_group_url(GROUP_NAME); + nexus_test_utils::http_testing::NexusRequest::new( + nexus_test_utils::http_testing::RequestBuilder::new( + client, + http::Method::GET, + &group_url_after_delete, + ) + .expect_status(Some(http::StatusCode::NOT_FOUND)), + ) + .authn_as(nexus_test_utils::http_testing::AuthnMode::PrivilegedUser) + .execute() + .await + .expect("Should get 404 response after instance deletion"); // Test API-level group attachment when disabled @@ -199,31 +213,34 @@ async fn test_multicast_enablement() { ) .await; - // Try to attach to multicast group via API - should succeed + // Try to attach to multicast group via API + // When multicast is disabled, this should fail let attach_url = format!( "/v1/instances/test-instance-api/multicast-groups/{GROUP_NAME}?project={PROJECT_NAME}" ); - nexus_test_utils::http_testing::NexusRequest::new( + let attach_body = serde_json::json!({}); + let attach_response = nexus_test_utils::http_testing::NexusRequest::new( nexus_test_utils::http_testing::RequestBuilder::new( client, http::Method::PUT, &attach_url, ) - .expect_status(Some(http::StatusCode::CREATED)), + .body(Some(&attach_body)) + .expect_status(Some(http::StatusCode::BAD_REQUEST)), ) .authn_as(nexus_test_utils::http_testing::AuthnMode::PrivilegedUser) .execute() .await - .expect("Multicast group attach should succeed even when disabled"); - - // Verify that direct API calls DO create member records even when disabled - // (This is correct behavior for experimental APIs - they handle config management) - let members = list_multicast_group_members(client, GROUP_NAME).await; - assert_eq!( - members.len(), - 1, - "Direct API calls should create member records even when disabled (experimental API behavior)" + .expect("Should get response from attach attempt"); + + // Verify the error message indicates multicast is disabled + let error: dropshot::HttpErrorResponseBody = + attach_response.parsed_body().expect("Should parse error body"); + assert!( + error.message.contains("multicast functionality is currently disabled"), + "Error message should indicate multicast is disabled, got: {}", + error.message ); cptestctx.teardown().await; diff --git a/nexus/tests/integration_tests/multicast/failures.rs b/nexus/tests/integration_tests/multicast/failures.rs index a47b4b01991..a15914725df 100644 --- a/nexus/tests/integration_tests/multicast/failures.rs +++ b/nexus/tests/integration_tests/multicast/failures.rs @@ -4,27 +4,40 @@ // // Copyright 2025 Oxide Computer Company -//! Integration tests for multicast group failure scenarios. +//! Integration tests for multicast group failure and recovery scenarios. //! -//! Tests DPD communication failures, reconciler resilience, and saga rollback -//! scenarios. +//! Tests resilience and error handling: +//! +//! - DPD communication failures: Recovery when switch is unavailable +//! - State consistency: Reconciler validates DB state matches DPD state +//! - DPD failures during state transitions: "Creating", "Active", "Deleting" states +//! - Implicit creation/deletion with DPD failures +//! - Concurrent operation races: Implicit creation, deletion with member add +//! - Drift correction: Reconciler syncs DPD when state is lost +//! - Member lifecycle: "Joining"→"Left" on instance stop, "Left" waits for "Active" -use std::net::{IpAddr, Ipv4Addr}; +use http::{Method, StatusCode}; +use nexus_test_utils::http_testing::{AuthnMode, NexusRequest, RequestBuilder}; use nexus_test_utils::resource_helpers::{ - create_default_ip_pool, create_instance, create_project, object_create, - object_delete, object_get, objects_list_page_authz, + create_default_ip_pool, create_instance, create_instance_with, + create_project, object_create, object_create_error, object_get, + objects_list_page_authz, }; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params::{ - MulticastGroupCreate, MulticastGroupMemberAdd, + ExternalIpCreate, InstanceDiskAttachment, + InstanceNetworkInterfaceAttachment, MulticastGroupIdentifier, + MulticastGroupMemberAdd, }; use nexus_types::external_api::views::{MulticastGroup, MulticastGroupMember}; -use omicron_common::api::external::{ - IdentityMetadataCreateParams, NameOrId, SwitchLocation, -}; +use omicron_common::api::external::{InstanceState, NameOrId, SwitchLocation}; +use omicron_uuid_kinds::InstanceUuid; use super::*; +use crate::integration_tests::instances::{ + instance_simulate, instance_wait_for_state, +}; #[nexus_test] async fn test_multicast_group_dpd_communication_failure_recovery( @@ -35,49 +48,28 @@ async fn test_multicast_group_dpd_communication_failure_recovery( let group_name = "dpd-failure-group"; let instance_name = "dpd-failure-instance"; - // Setup: project, pools, group with member - parallelize creation - let (_, _, mcast_pool) = ops::join3( + // Setup: project, pools - parallelize creation + let (_, _, _) = ops::join3( create_project(&client, project_name), create_default_ip_pool(&client), create_multicast_ip_pool(&client, "mcast-pool"), ) .await; - // Create group that will experience DPD communication failure - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 0, 1, 250)); - let group_url = "/v1/multicast-groups".to_string(); - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: String::from(group_name).parse().unwrap(), - description: "Group for DPD communication failure test".to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; + // Create instance first + create_instance(client, project_name, instance_name).await; - // Stop DPD BEFORE reconciler runs to test failure recovery + // Stop DPD before implicit creation to test failure recovery cptestctx.stop_dendrite(SwitchLocation::Switch0).await; - let created_group: MulticastGroup = - object_create(client, &group_url, ¶ms).await; - // Group should start in "Creating" state - assert_eq!( - created_group.state, "Creating", - "New multicast group should start in Creating state" + // Add member to group + let member_add_url = format!( + "/v1/multicast-groups/{group_name}/members?project={project_name}", ); - - // Add member to make group programmable - create_instance(client, project_name, instance_name).await; let member_params = MulticastGroupMemberAdd { instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: None, }; - let member_add_url = mcast_group_member_add_url( - group_name, - &member_params.instance, - project_name, - ); object_create::<_, MulticastGroupMember>( client, &member_add_url, @@ -85,7 +77,7 @@ async fn test_multicast_group_dpd_communication_failure_recovery( ) .await; - // Verify group remains in "Creating" state since DPD is unavailable + // Verify group was implicitly created and is in "Creating" state since DPD is unavailable // The reconciler can't progress the group to Active without DPD communication let group_get_url = mcast_group_url(group_name); let fetched_group: MulticastGroup = @@ -98,10 +90,18 @@ async fn test_multicast_group_dpd_communication_failure_recovery( ); // Verify group properties are maintained despite DPD issues - // The group should remain accessible and in "Creating" state since DPD is down - assert_eq!(fetched_group.identity.name, group_name); - assert_eq!(fetched_group.multicast_ip, multicast_ip); - assert_eq!(fetched_group.identity.id, created_group.identity.id); + assert_eq!(fetched_group.identity.name.as_str(), group_name); + + // Case: Verify member state during DPD failure + // Members should be in "Joining" or "Left" state when DPD is unavailable + // (they can't transition to "Joined" without successful DPD programming) + let members = list_multicast_group_members(client, group_name).await; + assert_eq!(members.len(), 1, "Should have exactly one member"); + assert!( + members[0].state == "Joining" || members[0].state == "Left", + "Member should be in Joining or Left state when DPD is unavailable, got: {}", + members[0].state + ); } #[nexus_test] @@ -111,42 +111,19 @@ async fn test_multicast_reconciler_state_consistency_validation( let client = &cptestctx.external_client; let project_name = "test-project"; - // Create multiple groups to test reconciler batch processing with failures - let (_, _, mcast_pool) = ops::join3( + // Setup: project and pools + let (_, _, _) = ops::join3( create_project(&client, project_name), create_default_ip_pool(&client), create_multicast_ip_pool(&client, "mcast-pool"), ) .await; - // Stop DPD BEFORE reconciler runs to test failure recovery - cptestctx.stop_dendrite(SwitchLocation::Switch0).await; + // Group names for implicit groups (implicitly created when first member joins) + let group_names = + ["consistency-group-1", "consistency-group-2", "consistency-group-3"]; - // Create groups that will test different failure scenarios using helper functions - let group_specs = &[ - MulticastGroupForTest { - name: "consistency-group-1", - multicast_ip: IpAddr::V4(Ipv4Addr::new(224, 0, 1, 220)), - description: Some("Group for state consistency test".to_string()), - }, - MulticastGroupForTest { - name: "consistency-group-2", - multicast_ip: IpAddr::V4(Ipv4Addr::new(224, 0, 1, 221)), - description: Some("Group for state consistency test".to_string()), - }, - MulticastGroupForTest { - name: "consistency-group-3", - multicast_ip: IpAddr::V4(Ipv4Addr::new(224, 0, 1, 222)), - description: Some("Group for state consistency test".to_string()), - }, - ]; - - // Create all groups rapidly to stress test reconciler - let created_groups = - create_multicast_groups(client, &mcast_pool, group_specs).await; - let group_names: Vec<&str> = group_specs.iter().map(|g| g.name).collect(); - - // Create instances and attach to groups in parallel (now that double-delete bug is fixed) + // Create instances first (groups will be implicitly created when members attach) let instance_names: Vec<_> = group_names .iter() .map(|&group_name| format!("instance-{group_name}")) @@ -158,41 +135,77 @@ async fn test_multicast_reconciler_state_consistency_validation( }); ops::join_all(create_futures).await; - // Attach instances to their respective groups in parallel - let attach_futures = instance_names.iter().zip(&group_names).map( - |(instance_name, &group_name)| { - multicast_group_attach( - cptestctx, - project_name, - instance_name, - group_name, - ) - }, - ); - ops::join_all(attach_futures).await; + // Stop DPD before attaching members to test failure recovery + // Groups will be implicitly created but stay in "Creating" state + cptestctx.stop_dendrite(SwitchLocation::Switch0).await; + + // Attach instances to their respective groups (triggers implicit creation for each group) + // Since DPD is down, groups will remain in "Creating" state + for (instance_name, &group_name) in instance_names.iter().zip(&group_names) + { + let member_add_url = format!( + "/v1/multicast-groups/{group_name}/members?project={project_name}", + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: None, + }; + object_create::<_, MulticastGroupMember>( + client, + &member_add_url, + &member_params, + ) + .await; + } + + // Wait for reconciler to attempt processing (will fail due to DPD being down) + wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; // Verify each group is in a consistent state (DPD failure prevents reconciliation) - for (i, group_name) in group_names.iter().enumerate() { - let original_group = &created_groups[i]; + for group_name in group_names.iter() { let group_get_url = mcast_group_url(group_name); let fetched_group: MulticastGroup = object_get(client, &group_get_url).await; - // Critical consistency checks - assert_eq!(fetched_group.identity.id, original_group.identity.id); - assert_eq!(fetched_group.multicast_ip, original_group.multicast_ip); - - // State should be Creating since all DPD processes were stopped - // The reconciler cannot activate groups without DPD communication + // State should be "Creating" since DPD is down assert_eq!( fetched_group.state, "Creating", "Group {group_name} should remain in Creating state when DPD is unavailable, found: {}", fetched_group.state ); + + // Case: Verify member state during DPD failure + let members = list_multicast_group_members(client, group_name).await; + assert_eq!( + members.len(), + 1, + "Group {group_name} should have exactly one member" + ); + assert!( + members[0].state == "Joining" || members[0].state == "Left", + "Member in group {group_name} should be Joining or Left when DPD unavailable, got: {}", + members[0].state + ); } - // Clean up all groups - test reconciler's ability to handle batch deletions - cleanup_multicast_groups(client, &group_names).await; + let instance_name_refs: Vec<&str> = + instance_names.iter().map(|s| s.as_str()).collect(); + cleanup_instances(cptestctx, client, project_name, &instance_name_refs) + .await; + + // With DPD down, groups cannot complete state transitions - they may be stuck + // in "Creating" (never reached "Active") or "Deleting" state. + wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; + + // Verify groups are either deleted or stuck in "Creating"/"Deleting" state + for group_name in group_names.iter() { + verify_group_deleted_or_in_states( + client, + group_name, + &["Creating", "Deleting"], + ) + .await; + } } #[nexus_test] @@ -204,48 +217,27 @@ async fn test_dpd_failure_during_creating_state( let group_name = "creating-dpd-fail-group"; let instance_name = "creating-fail-instance"; - // Setup: project, pools, group with member - parallelize creation - let (_, _, mcast_pool) = ops::join3( + // Setup: project, pools + let (_, _, _) = ops::join3( create_project(&client, project_name), create_default_ip_pool(&client), create_multicast_ip_pool(&client, "mcast-pool"), ) .await; - // Create group (IP within pool range 224.0.1.10 to 224.0.1.255) - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 0, 1, 210)); - let group_url = "/v1/multicast-groups".to_string(); - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: String::from(group_name).parse().unwrap(), - description: "Group for DPD failure during Creating state test" - .to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; + // Create instance first + create_instance(client, project_name, instance_name).await; - // Stop DPD before object creation of groups. + // Stop DPD before implicit creation cptestctx.stop_dendrite(SwitchLocation::Switch0).await; - let created_group: MulticastGroup = - object_create(client, &group_url, ¶ms).await; - // Group should start in "Creating" state - assert_eq!( - created_group.state, "Creating", - "New multicast group should start in Creating state" - ); - - // Add member to make group programmable - create_instance(client, project_name, instance_name).await; - + // Add member to group let member_add_url = format!( "/v1/multicast-groups/{group_name}/members?project={project_name}" ); let member_params = MulticastGroupMemberAdd { instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: None, }; object_create::<_, MulticastGroupMember>( client, @@ -254,8 +246,6 @@ async fn test_dpd_failure_during_creating_state( ) .await; - // Stop DPD process BEFORE reconciler runs to test Creating→Creating failure - // Wait for reconciler to process - tests DPD communication handling during "Creating" state wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; @@ -264,8 +254,7 @@ async fn test_dpd_failure_during_creating_state( let fetched_group: MulticastGroup = object_get(client, &group_get_url).await; - // Critical assertion: Group should remain in "Creating" state since DPD is unavailable - // The reconciler cannot transition Creating→Active without DPD communication + // Group should remain in "Creating" state since DPD is down assert_eq!( fetched_group.state, "Creating", "Group should remain in Creating state when DPD is unavailable during activation, found: {}", @@ -273,12 +262,20 @@ async fn test_dpd_failure_during_creating_state( ); // Verify group properties are maintained - assert_eq!(fetched_group.identity.name, group_name); - assert_eq!(fetched_group.multicast_ip, multicast_ip); - assert_eq!(fetched_group.identity.id, created_group.identity.id); + assert_eq!(fetched_group.identity.name.as_str(), group_name); + + // Case: Verify member state during DPD failure + let members = list_multicast_group_members(client, group_name).await; + assert_eq!(members.len(), 1, "Should have exactly one member"); + assert!( + members[0].state == "Joining" || members[0].state == "Left", + "Member should be Joining or Left when DPD unavailable during Creating state, got: {}", + members[0].state + ); - // Test cleanup - should work regardless of DPD state - object_delete(client, &group_get_url).await; + // Test cleanup - remove member, which triggers implicit deletion + multicast_group_detach(client, project_name, instance_name, group_name) + .await; wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; } @@ -292,41 +289,24 @@ async fn test_dpd_failure_during_active_state( let group_name = "active-dpd-fail-group"; let instance_name = "active-fail-instance"; - // Setup: project, pools, group with member - create_project(&client, project_name).await; - create_default_ip_pool(&client).await; - - let mcast_pool = create_multicast_ip_pool(&client, "mcast-pool").await; - - // Create group that will become active first - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 0, 1, 211)); - let group_url = "/v1/multicast-groups".to_string(); - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: String::from(group_name).parse().unwrap(), - description: "Group for DPD failure during Active state test" - .to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; + // Create project and pools in parallel + let (_, _, _) = ops::join3( + create_project(&client, project_name), + create_default_ip_pool(&client), + create_multicast_ip_pool(&client, "mcast-pool"), + ) + .await; - let created_group: MulticastGroup = - object_create(client, &group_url, ¶ms).await; - assert_eq!(created_group.state, "Creating"); + let instance = create_instance(client, project_name, instance_name).await; - // Add member to make group programmable - create_instance(client, project_name, instance_name).await; + // Add member to group + let member_add_url = format!( + "/v1/multicast-groups/{group_name}/members?project={project_name}" + ); let member_params = MulticastGroupMemberAdd { instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: None, }; - let member_add_url = mcast_group_member_add_url( - group_name, - &member_params.instance, - project_name, - ); object_create::<_, MulticastGroupMember>( client, &member_add_url, @@ -334,48 +314,54 @@ async fn test_dpd_failure_during_active_state( ) .await; - // First, let the group activate normally with DPD running - wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; + // Wait for group to become "Active" and member to reach "Joined" state + wait_for_group_active(client, group_name).await; + wait_for_member_state( + cptestctx, + group_name, + instance.identity.id, + nexus_db_model::MulticastGroupMemberState::Joined, + ) + .await; - // Verify group is now Active (or at least not Creating anymore) + // Verify group is now "Active" let group_get_url = mcast_group_url(group_name); let active_group: MulticastGroup = object_get(client, &group_get_url).await; + assert_eq!(active_group.state, "Active"); - // Group should be Active or at least no longer Creating - assert!( - active_group.state == "Active" || active_group.state == "Creating", - "Group should be Active or Creating before DPD failure test, found: {}", - active_group.state - ); + // Now stop DPD while group is "Active" and member is "Joined" + cptestctx.stop_dendrite(SwitchLocation::Switch0).await; - // Only proceed with failure test if group successfully activated - if active_group.state == "Active" { - // Now stop DPD while group is "Active" to test "Active" state resilience - cptestctx.stop_dendrite(SwitchLocation::Switch0).await; + // Wait for reconciler to process - tests DPD communication handling during "Active" state + wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; - // Wait for reconciler to process - tests DPD communication handling during "Active" state - wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; + // Check group state after reconciler processes with DPD unavailable + let fetched_group: MulticastGroup = + object_get(client, &group_get_url).await; - // Check group state after reconciler processes with DPD unavailable - let fetched_group: MulticastGroup = - object_get(client, &group_get_url).await; + // Group should remain "Active" - existing "Active" groups shouldn't change state due to DPD failures + assert_eq!( + fetched_group.state, "Active", + "Active group should remain Active despite DPD communication failure, found: {}", + fetched_group.state + ); - // Group should remain "Active" - existing "Active" groups shouldn't change state due to DPD failures - // The reconciler should handle temporary DPD communication issues gracefully - assert_eq!( - fetched_group.state, "Active", - "Active group should remain Active despite DPD communication failure, found: {}", - fetched_group.state - ); + // Verify group properties are maintained + assert_eq!(fetched_group.identity.name.as_str(), group_name); - // Verify group properties are maintained - assert_eq!(fetched_group.identity.name, group_name); - assert_eq!(fetched_group.multicast_ip, multicast_ip); - assert_eq!(fetched_group.identity.id, created_group.identity.id); - } + // Case: Verify member state persists during DPD failure for Active groups + // Members that were already "Joined" should remain "Joined" even when DPD is unavailable + let members = list_multicast_group_members(client, group_name).await; + assert_eq!(members.len(), 1, "Should have exactly one member"); + assert_eq!( + members[0].state, "Joined", + "Member should remain Joined when DPD fails after group reached Active state, got: {}", + members[0].state + ); - // Test cleanup - should work regardless of DPD state - object_delete(client, &group_get_url).await; + // Test cleanup - remove member, which triggers implicit deletion + multicast_group_detach(client, project_name, instance_name, group_name) + .await; // Wait for reconciler to process the deletion wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; @@ -390,38 +376,24 @@ async fn test_dpd_failure_during_deleting_state( let group_name = "deleting-dpd-fail-group"; let instance_name = "deleting-fail-instance"; - // Setup: project, pools, group with member - create_project(&client, project_name).await; - create_default_ip_pool(&client).await; - - let mcast_pool = create_multicast_ip_pool(&client, "mcast-pool").await; - - // Create group that we'll delete while DPD is down - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 0, 1, 212)); - let group_url = "/v1/multicast-groups".to_string(); - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: String::from(group_name).parse().unwrap(), - description: "Group for DPD failure during Deleting state test" - .to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; - - let created_group: MulticastGroup = - object_create(client, &group_url, ¶ms).await; - assert_eq!(created_group.state, "Creating"); + // Create project and pools in parallel + let (_, _, _) = ops::join3( + create_project(&client, project_name), + create_default_ip_pool(&client), + create_multicast_ip_pool(&client, "mcast-pool"), + ) + .await; - // Add member and let group activate + // Create instance first create_instance(client, project_name, instance_name).await; + + // Add member to group let member_add_url = format!( "/v1/multicast-groups/{group_name}/members?project={project_name}" ); let member_params = MulticastGroupMemberAdd { instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: None, }; object_create::<_, MulticastGroupMember>( client, @@ -433,13 +405,13 @@ async fn test_dpd_failure_during_deleting_state( // Wait for group to reach "Active" state before testing deletion wait_for_group_active(client, group_name).await; - // Now delete the group to put it in "Deleting" state - let group_delete_url = mcast_group_url(group_name); - object_delete(client, &group_delete_url).await; - - // Stop DPD AFTER deletion but BEFORE reconciler processes deletion + // Stop DPD before triggering deletion (by removing member) cptestctx.stop_dendrite(SwitchLocation::Switch0).await; + // Remove the member to trigger implicit deletion (group should go to "Deleting" state) + multicast_group_detach(client, project_name, instance_name, group_name) + .await; + // The group should now be in "Deleting" state and DPD is down // Let's check the state before reconciler runs // Group should be accessible via GET request @@ -492,9 +464,7 @@ async fn test_dpd_failure_during_deleting_state( ); // Verify group properties are maintained during failed deletion - assert_eq!(group.identity.name, group_name); - assert_eq!(group.multicast_ip, multicast_ip); - assert_eq!(group.identity.id, created_group.identity.id); + assert_eq!(group.identity.name.as_str(), group_name); } // Note: If group is gone, that means deletion succeeded despite DPD being down, // which would indicate the reconciler has fallback cleanup logic @@ -509,46 +479,28 @@ async fn test_multicast_group_members_during_dpd_failure( let group_name = "member-dpd-fail-group"; let instance_name = "member-test-instance"; - // Setup: project, pools, group with member - parallelize creation - let (_, _, mcast_pool) = ops::join3( + // Setup: project, pools + let (_, _, _) = ops::join3( create_project(&client, project_name), create_default_ip_pool(&client), create_multicast_ip_pool(&client, "mcast-pool"), ) .await; - // Create group - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 0, 1, 213)); - let group_url = "/v1/multicast-groups".to_string(); - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: String::from(group_name).parse().unwrap(), - description: "Group for member state during DPD failure test" - .to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; + // Create instance first + let instance = create_instance(client, project_name, instance_name).await; // Stop DPD to test member operations during failure cptestctx.stop_dendrite(SwitchLocation::Switch0).await; - let created_group: MulticastGroup = - object_create(client, &group_url, ¶ms).await; - assert_eq!(created_group.state, "Creating"); - - // Add member - let instance = create_instance(client, project_name, instance_name).await; - + // Add member to group let member_add_url = format!( "/v1/multicast-groups/{group_name}/members?project={project_name}" ); let member_params = MulticastGroupMemberAdd { instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: None, }; - object_create::<_, MulticastGroupMember>( client, &member_add_url, @@ -556,7 +508,12 @@ async fn test_multicast_group_members_during_dpd_failure( ) .await; - // Verify member is accessible before DPD failure + // Get the implicitly created group for later verification + let group_get_url = mcast_group_url(group_name); + let created_group: MulticastGroup = + object_get(client, &group_get_url).await; + + // Verify member is accessible let members_url = format!("/v1/multicast-groups/{group_name}/members"); let initial_members = nexus_test_utils::resource_helpers::objects_list_page_authz::< @@ -564,12 +521,7 @@ async fn test_multicast_group_members_during_dpd_failure( >(client, &members_url) .await .items; - assert_eq!( - initial_members.len(), - 1, - "Should have exactly one member before DPD failure" - ); - // Note: Members store instance_id (UUID), not instance name + assert_eq!(initial_members.len(), 1, "Should have exactly one member"); assert_eq!(initial_members[0].instance_id, instance.identity.id); // Wait for reconciler - group should remain in "Creating" state @@ -593,8 +545,15 @@ async fn test_multicast_group_members_during_dpd_failure( created_group.identity.id ); + // Case: Verify member state during DPD failure + assert!( + members_during_failure[0].state == "Joining" + || members_during_failure[0].state == "Left", + "Member should be Joining or Left when DPD unavailable, got: {}", + members_during_failure[0].state + ); + // Verify group is still in "Creating" state - let group_get_url = mcast_group_url(group_name); let fetched_group: MulticastGroup = object_get(client, &group_get_url).await; @@ -604,9 +563,797 @@ async fn test_multicast_group_members_during_dpd_failure( fetched_group.state ); - // Clean up - object_delete(client, &group_get_url).await; + multicast_group_detach(client, project_name, instance_name, group_name) + .await; // Wait for reconciler to process the deletion wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; } + +/// Test that implicit creation works correctly when DPD is unavailable. +/// +/// When a member is added to a non-existent group (by name), the system should: +/// 1. Implicitly create the group in "Creating" state +/// 2. Create the member in "Left" state (since instance is stopped) +/// 3. The group should remain in "Creating" state until DPD is available +/// +/// This tests the implicit group lifecycle: groups are implicitly created +/// when the first instance joins. +#[nexus_test] +async fn test_implicit_creation_with_dpd_failure( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + let project_name = "implicit-create-dpd-fail-project"; + let group_name = "implicit-created-dpd-fail-group"; + let instance_name = "implicit-create-instance"; + + // Create project and pools in parallel + let (_, _, _) = ops::join3( + create_project(&client, project_name), + create_default_ip_pool(&client), + create_multicast_ip_pool(&client, "implicit-create-pool"), + ) + .await; + + // Create instance first + let instance = create_instance(client, project_name, instance_name).await; + + // Stop DPD before implicit creation + cptestctx.stop_dendrite(SwitchLocation::Switch0).await; + + // Add the instance as a member to a non-existent group (triggers implicit creation) + let member_add_url = format!( + "/v1/multicast-groups/{group_name}/members?project={project_name}" + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: None, + }; + object_create::<_, MulticastGroupMember>( + client, + &member_add_url, + &member_params, + ) + .await; + + // Verify the implicitly created group exists and is in "Creating" state + let group_url = mcast_group_url(group_name); + let implicitly_created_group: MulticastGroup = + object_get(client, &group_url).await; + + assert_eq!( + implicitly_created_group.state, "Creating", + "Implicitly created group should start in Creating state" + ); + assert_eq!(implicitly_created_group.identity.name.as_str(), group_name); + + // Wait for reconciler - group should remain in "Creating" since DPD is down + wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; + + // Verify group is still in "Creating" state + let fetched_group: MulticastGroup = object_get(client, &group_url).await; + assert_eq!( + fetched_group.state, "Creating", + "Implicitly created group should remain in Creating state when DPD is unavailable" + ); + + // Verify member is still attached + let members = list_multicast_group_members(client, group_name).await; + assert_eq!(members.len(), 1, "Should have one member"); + assert_eq!(members[0].instance_id, instance.identity.id); + + // Case: Verify member state during DPD failure for implicit creation + assert!( + members[0].state == "Joining" || members[0].state == "Left", + "Member should be Joining or Left when DPD unavailable during implicit creation, got: {}", + members[0].state + ); + + multicast_group_detach(client, project_name, instance_name, group_name) + .await; +} + +/// Test that implicit deletion works correctly when DPD is unavailable. +/// +/// When the last member leaves an implicit group, the system should: +/// 1. Mark the group for deletion (transition to "Deleting" state) +/// 2. The group should remain in "Deleting" state until DPD is available +/// +/// This tests the implicit group lifecycle: groups are implicitly deleted +/// when the last instance leaves. +#[nexus_test] +async fn test_implicit_deletion_with_dpd_failure( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + let project_name = "implicit-delete-dpd-fail-project"; + let group_name = "implicit-delete-dpd-fail-group"; + let instance_name = "implicit-delete-instance"; + + // Create project and pools in parallel + let (_, _, _) = ops::join3( + create_project(&client, project_name), + create_default_ip_pool(&client), + create_multicast_ip_pool(&client, "implicit-delete-pool"), + ) + .await; + + // Create instance first + create_instance(client, project_name, instance_name).await; + + // Add member to group + let member_add_url = format!( + "/v1/multicast-groups/{group_name}/members?project={project_name}" + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: None, + }; + object_create::<_, MulticastGroupMember>( + client, + &member_add_url, + &member_params, + ) + .await; + + // Wait for group to become Active + wait_for_group_active(client, group_name).await; + + // Get the group ID for later verification + let group_url = mcast_group_url(group_name); + let created_group: MulticastGroup = object_get(client, &group_url).await; + + // Now stop DPD before removing the last member + cptestctx.stop_dendrite(SwitchLocation::Switch0).await; + + // Remove the last member (should trigger implicit deletion) + multicast_group_detach(client, project_name, instance_name, group_name) + .await; + + // Wait for reconciler to process + wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; + + // The group should be in "Deleting" state since DPD is unavailable + // List groups to check if it still exists + let groups_result = objects_list_page_authz::( + client, + "/v1/multicast-groups", + ) + .await; + + let remaining_groups: Vec<_> = groups_result + .items + .into_iter() + .filter(|g| g.identity.id == created_group.identity.id) + .collect(); + + if !remaining_groups.is_empty() { + let group = &remaining_groups[0]; + assert_eq!( + group.state, "Deleting", + "Group should be in Deleting state when last member leaves and DPD is unavailable, found: {}", + group.state + ); + assert_eq!(group.identity.id, created_group.identity.id); + } + // Note: If group is gone, implicit deletion succeeded despite DPD being down + // (possibly via database-only cleanup) +} + +/// Test concurrent implicit creation race conditions. +/// +/// When multiple instances try to join a non-existent group simultaneously, +/// only one should create the group and all should become members. +/// This tests that the implicit creation logic handles conflicts correctly. +#[nexus_test] +async fn test_concurrent_implicit_creation_race( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + let project_name = "concurrent-implicit-create-project"; + let group_name = "concurrent-implicit-create-group"; + + // Create project and pools in parallel + let (_, _, _) = ops::join3( + create_project(&client, project_name), + create_default_ip_pool(&client), + create_multicast_ip_pool(&client, "concurrent-implicit-create-pool"), + ) + .await; + + // Create multiple instances (without starting them to avoid saga timing issues) + let instance_names = + ["race-instance-1", "race-instance-2", "race-instance-3"]; + let create_instance_futures = instance_names.iter().map(|name| { + create_instance_with( + client, + project_name, + name, + &InstanceNetworkInterfaceAttachment::Default, + Vec::::new(), + Vec::::new(), + false, // start=false: Don't start instances to avoid timing issues + Default::default(), + None, + Vec::::new(), + ) + }); + let instances = ops::join_all(create_instance_futures).await; + + // Ensure inventory and DPD are ready before adding members to groups + ensure_inventory_ready(cptestctx).await; + ensure_dpd_ready(cptestctx).await; + + // Try to add all instances to the non-existent group concurrently + // This will race to implicitly create the group + let add_member_futures = instance_names.iter().map(|instance_name| { + let member_add_url = format!( + "/v1/multicast-groups/{group_name}/members?project={project_name}" + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: None, + }; + async move { + object_create::<_, MulticastGroupMember>( + client, + &member_add_url, + &member_params, + ) + .await + } + }); + + // Execute all member additions concurrently + let members = ops::join_all(add_member_futures).await; + + // All member additions should have succeeded + assert_eq!(members.len(), 3, "All 3 member additions should succeed"); + + // Verify the group exists and has all members + let group_url = mcast_group_url(group_name); + let implicitly_created_group: MulticastGroup = + object_get(client, &group_url).await; + assert_eq!(implicitly_created_group.identity.name.as_str(), group_name); + + // Verify all instances are members + let final_members = list_multicast_group_members(client, group_name).await; + assert_eq!( + final_members.len(), + 3, + "Group should have all 3 members after concurrent implicit creation" + ); + + // Verify each instance is a member + for instance in &instances { + assert!( + final_members.iter().any(|m| m.instance_id == instance.identity.id), + "Instance {} should be a member of the group", + instance.identity.name + ); + } + + cleanup_instances(cptestctx, client, project_name, &instance_names).await; + + // Wait for group to be implicitly deleted (may already be deleted if cleanup succeeded) + wait_for_group_deleted(client, group_name).await; +} + +/// Test implicit deletion race with member add. +/// +/// When the last member is leaving (triggering implicit deletion) while another +/// instance is trying to join, the system should handle this gracefully. +/// Either the group survives with the new member, or implicit deletion completes +/// and the new member triggers a fresh implicit creation. +#[nexus_test] +async fn test_implicit_deletion_race_with_member_add( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + let project_name = "delete-race-project"; + let group_name = "delete-race-group"; + + // Create project and pools in parallel + let (_, _, _) = ops::join3( + create_project(&client, project_name), + create_default_ip_pool(&client), + create_multicast_ip_pool(&client, "delete-race-pool"), + ) + .await; + + // Create instances + let instance_names = + ["leaving-instance", "joining-instance-1", "joining-instance-2"]; + let create_instance_futures = instance_names + .iter() + .map(|name| create_instance(client, project_name, name)); + let instances = ops::join_all(create_instance_futures).await; + + // Add first member + let member_add_url = format!( + "/v1/multicast-groups/{group_name}/members?project={project_name}", + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name("leaving-instance".parse().unwrap()), + source_ips: None, + }; + object_create::<_, MulticastGroupMember>( + client, + &member_add_url, + &member_params, + ) + .await; + + // Wait for group to become Active + wait_for_group_active(client, group_name).await; + + // Now execute detach and add concurrently + // The leaving instance triggers implicit deletion, while joining instances try to add + let detach_future = multicast_group_detach( + client, + project_name, + "leaving-instance", + group_name, + ); + + let join_futures = ["joining-instance-1", "joining-instance-2"] + .iter() + .map(|instance_name| { + let member_add_url = format!( + "/v1/multicast-groups/{group_name}/members?project={project_name}" + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: None, + }; + async move { + // This might fail if group is deleted, or succeed if it beats the delete + let result = nexus_test_utils::http_testing::NexusRequest::new( + nexus_test_utils::http_testing::RequestBuilder::new( + client, + http::Method::POST, + &member_add_url, + ) + .body(Some(&member_params)), + ) + .authn_as(nexus_test_utils::http_testing::AuthnMode::PrivilegedUser) + .execute() + .await; + + match result { + Ok(response) + if response.status == http::StatusCode::CREATED => + { + Some( + response + .parsed_body::() + .unwrap(), + ) + } + _ => None, // Failed to join (group might be deleting) + } + } + }); + + // Execute concurrently + let (_, join_results) = + ops::join2(detach_future, ops::join_all(join_futures)).await; + + // Wait for reconciler to process everything + wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; + + // Check the final state - group should exist if any join succeeded + let successful_joins: Vec<_> = join_results.into_iter().flatten().collect(); + + if !successful_joins.is_empty() { + // At least one instance joined - group should exist + let group_url = mcast_group_url(group_name); + let final_group_result: Result = + nexus_test_utils::http_testing::NexusRequest::object_get( + client, &group_url, + ) + .authn_as(nexus_test_utils::http_testing::AuthnMode::PrivilegedUser) + .execute() + .await + .map(|r| r.parsed_body().unwrap()); + + match final_group_result { + Ok(_) => { + // Group exists - verify it has the joining instances + let members = + list_multicast_group_members(client, group_name).await; + assert!( + !members.is_empty(), + "Group should have members if it exists" + ); + + // The leaving instance should not be a member + assert!( + !members + .iter() + .any(|m| m.instance_id == instances[0].identity.id), + "Leaving instance should not be a member" + ); + } + Err(_) => { + // Group was deleted - that's also valid if timing worked out + // The joining instances should have gotten errors + } + } + } + + // Cleanup - delete instances; group is implicitly deleted when last member removed + cleanup_instances(cptestctx, client, project_name, &instance_names).await; + + // Implicit model: group is implicitly deleted when last member (instance) is removed + // Wait for group to be deleted (may already be deleted if no joins succeeded) + wait_for_group_deleted(client, group_name).await; +} + +/// Test that joining a deleted instance to a multicast group returns NOT_FOUND. +/// +/// This verifies proper error handling when attempting to add an instance that +/// was previously deleted to a multicast group. +#[nexus_test] +async fn test_multicast_join_deleted_instance( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + let project_name = "test-project"; + let group_name = "test-group"; + let instance_to_delete = "instance-to-delete"; + let remaining_instance = "remaining-instance"; + + // Setup: project and pools + let (_, _, _) = ops::join3( + create_project(&client, project_name), + create_default_ip_pool(&client), + create_multicast_ip_pool(&client, "mcast-pool"), + ) + .await; + + // Create two instances + create_instance(client, project_name, instance_to_delete).await; + create_instance(client, project_name, remaining_instance).await; + + // Create group with the remaining instance (so the group stays alive) + let member_add_url = format!( + "{}?project={project_name}", + mcast_group_members_url(group_name) + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name(remaining_instance.parse().unwrap()), + source_ips: None, + }; + object_create::<_, MulticastGroupMember>( + client, + &member_add_url, + &member_params, + ) + .await; + + // Wait for group to become active + wait_for_group_active(&client, group_name).await; + + // Delete the first instance using cleanup_instances (handles stop/delete flow) + cleanup_instances(cptestctx, client, project_name, &[instance_to_delete]) + .await; + + // Now try to add the deleted instance to the group - should fail with NOT_FOUND + let member_params_deleted = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_to_delete.parse().unwrap()), + source_ips: None, + }; + object_create_error( + client, + &member_add_url, + &member_params_deleted, + http::StatusCode::NOT_FOUND, + ) + .await; + + // Cleanup + cleanup_instances(cptestctx, client, project_name, &[remaining_instance]) + .await; + wait_for_group_deleted(client, group_name).await; +} + +/// Test drift correction: DPD loses group state and reconciler re-syncs it. +/// +/// This simulates DPD drift where the switch has lost the multicast group +/// information (e.g., after a switch restart). The reconciler should detect +/// the missing state and re-sync DPD to restore the group to "Active" state. +#[nexus_test] +async fn test_drift_correction_missing_group_in_dpd( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + let project_name = "drift-test-project"; + let group_name = "drift-test-group"; + let instance_name = "drift-test-instance"; + + // Create project and pools in parallel + let (_, _, _) = ops::join3( + create_project(&client, project_name), + create_default_ip_pool(&client), + create_multicast_ip_pool(&client, "drift-pool"), + ) + .await; + + // Create instance + create_instance(client, project_name, instance_name).await; + + // Create group by adding member + let member_add_url = format!( + "/v1/multicast-groups/{group_name}/members?project={project_name}" + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: None, + }; + object_create::<_, MulticastGroupMember>( + client, + &member_add_url, + &member_params, + ) + .await; + + wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; + wait_for_group_active(client, group_name).await; + + let group_get_url = mcast_group_url(group_name); + let active_group: MulticastGroup = object_get(client, &group_get_url).await; + assert_eq!(active_group.state, "Active", "Group should be Active"); + + // Get multicast IP for DPD queries + let multicast_ip = active_group.multicast_ip; + + // Verify group exists in DPD before restart + let dpd_client = nexus_test_utils::dpd_client(cptestctx); + assert!( + dpd_client.multicast_group_get(&multicast_ip).await.is_ok(), + "Group should exist in DPD before restart" + ); + + // Simulate drift: restart DPD (clears its state) + // This leaves the group "Active" in DB but missing from DPD + cptestctx.restart_dendrite(SwitchLocation::Switch0).await; + + // Verify group is missing from DPD after restart (drift exists) + let dpd_client = nexus_test_utils::dpd_client(cptestctx); + assert!( + dpd_client.multicast_group_get(&multicast_ip).await.is_err(), + "Group should NOT exist in DPD after restart (this is the drift)" + ); + + // Activate reconciler - should detect missing group and re-program DPD + activate_multicast_reconciler(&cptestctx.lockstep_client).await; + wait_for_group_active(client, group_name).await; + + // Verify drift was corrected: group now exists in DPD again + assert!( + dpd_client.multicast_group_get(&multicast_ip).await.is_ok(), + "Group should exist in DPD after drift correction" + ); + + // Verify group is still "Active" in DB + let fetched_group: MulticastGroup = + object_get(client, &group_get_url).await; + assert_eq!( + fetched_group.state, "Active", + "Group should remain Active after drift correction, found: {}", + fetched_group.state + ); + + // Verify group properties maintained + assert_eq!(fetched_group.identity.name.as_str(), group_name); + assert_eq!(fetched_group.identity.id, active_group.identity.id); + + // Cleanup + multicast_group_detach(client, project_name, instance_name, group_name) + .await; + wait_for_group_deleted(client, group_name).await; +} + +/// Test member state transition: "Joining" → "Left" when instance becomes invalid. +/// +/// When a member is in "Joining" state (waiting for DPD programming) and the +/// instance becomes invalid (stopped/failed), the RPW should transition the +/// member to "Left" state. +#[nexus_test] +async fn test_member_joining_to_left_on_instance_stop( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + let project_name = "test-project"; + let group_name = "joining-to-left-group"; + let instance_name = "joining-to-left-instance"; + + // Setup: Create project and pools + let (_, _, _) = ops::join3( + create_default_ip_pool(&client), + create_project(client, project_name), + create_multicast_ip_pool(&client, "mcast-pool"), + ) + .await; + + // Create and start instance + let instance = create_instance(client, project_name, instance_name).await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); + + // Stop DPD to prevent member from transitioning to "Joined" + cptestctx.stop_dendrite(SwitchLocation::Switch0).await; + + // Add instance to group - member will be stuck in "Joining" since DPD is down + let member_add_url = format!( + "/v1/multicast-groups/{group_name}/members?project={project_name}" + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: None, + }; + object_create::<_, MulticastGroupMember>( + client, + &member_add_url, + &member_params, + ) + .await; + + // Run reconciler - member should stay in "Joining" since DPD is unavailable + wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; + + // Verify member is in "Joining" state (can't reach Joined without DPD) + let members = list_multicast_group_members(client, group_name).await; + assert_eq!(members.len(), 1); + assert_eq!( + members[0].state, "Joining", + "Running instance member should be Joining when DPD unavailable, got: {}", + members[0].state + ); + + // Stop the instance while member is in "Joining" state + let nexus = &cptestctx.server.server_context().nexus; + let stop_url = + format!("/v1/instances/{instance_name}/stop?project={project_name}"); + NexusRequest::new( + RequestBuilder::new(client, Method::POST, &stop_url) + .body(None as Option<&serde_json::Value>) + .expect_status(Some(StatusCode::ACCEPTED)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap(); + instance_simulate(nexus, &instance_id).await; + instance_wait_for_state(&client, instance_id, InstanceState::Stopped).await; + + // Run reconciler - should detect invalid instance and transition to "Left" + wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; + + // Verify member transitioned to "Left" state + wait_for_member_state( + cptestctx, + group_name, + instance.identity.id, + nexus_db_model::MulticastGroupMemberState::Left, + ) + .await; + + let members_after = list_multicast_group_members(client, group_name).await; + assert_eq!(members_after.len(), 1); + assert_eq!( + members_after[0].state, "Left", + "Member should transition to Left when instance stops while in Joining state" + ); +} + +/// Test that "Left" members stay in "Left" while group is still "Creating". +/// +/// When a member is in "Left" state and the instance starts running, the member +/// should stay in "Left" until the group becomes "Active". This prevents +/// premature member activation when the group hasn't been programmed in DPD. +#[nexus_test] +async fn test_left_member_waits_for_group_active( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + let project_name = "test-project"; + let group_name = "left-waits-group"; + let instance_name = "left-waits-instance"; + + // Setup: Create project and pools + let (_, _, _) = ops::join3( + create_default_ip_pool(&client), + create_project(client, project_name), + create_multicast_ip_pool(&client, "mcast-pool"), + ) + .await; + + // Create a stopped instance first (not running) + let instance = create_instance_with( + client, + project_name, + instance_name, + &InstanceNetworkInterfaceAttachment::Default, + vec![], + vec![], + false, // don't start + None, // auto_restart_policy + None, // cpu_platform + vec![], // multicast_groups + ) + .await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); + let nexus = &cptestctx.server.server_context().nexus; + + // Stop DPD to keep group in "Creating" state + cptestctx.stop_dendrite(SwitchLocation::Switch0).await; + + // Add stopped instance to group - member will be in "Left" state + let member_add_url = format!( + "/v1/multicast-groups/{group_name}/members?project={project_name}" + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: None, + }; + object_create::<_, MulticastGroupMember>( + client, + &member_add_url, + &member_params, + ) + .await; + + // Verify group is stuck in "Creating" (DPD is down) + wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; + let group: MulticastGroup = + object_get(client, &format!("/v1/multicast-groups/{group_name}")).await; + assert_eq!( + group.state, "Creating", + "Group should be stuck in Creating without DPD" + ); + + // Verify member is in "Left" state (stopped instance) + let members = list_multicast_group_members(client, group_name).await; + assert_eq!(members.len(), 1); + assert_eq!( + members[0].state, "Left", + "Stopped instance member should be in Left state" + ); + + // Start the instance while group is still Creating + let start_url = + format!("/v1/instances/{instance_name}/start?project={project_name}"); + NexusRequest::new( + RequestBuilder::new(client, Method::POST, &start_url) + .body(None as Option<&serde_json::Value>) + .expect_status(Some(StatusCode::ACCEPTED)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap(); + instance_simulate(nexus, &instance_id).await; + instance_wait_for_state(&client, instance_id, InstanceState::Running).await; + + // Run reconciler - member should stay in Left because group is not Active + wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; + + // Verify member stays in "Left" (waiting for group to become Active) + let members_after = list_multicast_group_members(client, group_name).await; + assert_eq!(members_after.len(), 1); + assert_eq!( + members_after[0].state, "Left", + "Member should stay in Left while group is Creating, got: {}", + members_after[0].state + ); + + // Verify group is still Creating + let group_after: MulticastGroup = + object_get(client, &format!("/v1/multicast-groups/{group_name}")).await; + assert_eq!( + group_after.state, "Creating", + "Group should still be Creating without DPD" + ); +} diff --git a/nexus/tests/integration_tests/multicast/groups.rs b/nexus/tests/integration_tests/multicast/groups.rs index 8d795c6d26a..172323b8074 100644 --- a/nexus/tests/integration_tests/multicast/groups.rs +++ b/nexus/tests/integration_tests/multicast/groups.rs @@ -4,2907 +4,1574 @@ // // Copyright 2025 Oxide Computer Company -//! Integration tests for multicast group APIs and basic membership operations. +//! Integration tests for multicast group APIs and IP pool operations. +//! +//! Core multicast functionality tests: +//! +//! - IP pool range validation and allocation +//! - Member operations: add, remove, list, lookup by IP +//! - Instance deletion cleanup (removes multicast memberships) +//! - Source IP validation for SSM groups +//! - Automatic pool selection and default pool behavior +//! - Pool exhaustion handling +//! - Pool deletion protection (cannot delete pool with active groups) +//! - DPD(-client) integration: verifies groups are programmed on switches use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; +use dpd_client::types as dpd_types; use dropshot::HttpErrorResponseBody; use dropshot::ResultsPage; use http::{Method, StatusCode}; - -use crate::integration_tests::instances::{ - instance_simulate, instance_wait_for_state, -}; -use dpd_client::Error as DpdError; -use dpd_client::types as dpd_types; -use nexus_db_queries::db::fixed_data::silo::DEFAULT_SILO; use nexus_test_utils::dpd_client; use nexus_test_utils::http_testing::{AuthnMode, NexusRequest, RequestBuilder}; use nexus_test_utils::resource_helpers::{ - create_default_ip_pool, create_instance, create_project, link_ip_pool, - object_create, object_create_error, object_delete, object_get, - object_get_error, object_put, object_put_error, + create_default_ip_pool, create_instance, create_project, object_create, + object_create_error, object_delete, object_delete_error, object_get, + object_get_error, }; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params::{ - IpPoolCreate, MulticastGroupCreate, MulticastGroupMemberAdd, - MulticastGroupUpdate, + InstanceMulticastGroupJoin, IpPoolCreate, MulticastGroupMemberAdd, }; use nexus_types::external_api::shared::{IpRange, Ipv4Range, Ipv6Range}; use nexus_types::external_api::views::{ IpPool, IpPoolRange, IpVersion, MulticastGroup, MulticastGroupMember, }; -use nexus_types::identity::Resource; use omicron_common::api::external::{ - IdentityMetadataCreateParams, IdentityMetadataUpdateParams, InstanceState, - NameOrId, Nullable, + IdentityMetadataCreateParams, InstanceState, NameOrId, }; -use omicron_common::vlan::VlanID; use omicron_uuid_kinds::InstanceUuid; use super::*; +use crate::integration_tests::instances::{ + instance_simulate, instance_wait_for_state, +}; -/// Verify creation works when optional fields are omitted from the JSON body -/// (i.e., keys are missing, not present as `null`). This mirrors CLI behavior. +/// Test that multicast IP pools reject invalid ranges at the pool level #[nexus_test] -async fn test_multicast_group_create_raw_omitted_optionals( +async fn test_multicast_ip_pool_range_validation( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let project_name = "raw-omit-proj"; - let pool_name = "raw-omit-pool"; - let group_name = "raw-omit-group"; - // Ensure a project exists (not strictly required for fleet-scoped groups) - create_project(client, project_name).await; + // Create IPv4 multicast pool + let pool_params = IpPoolCreate::new_multicast( + IdentityMetadataCreateParams { + name: "test-v4-pool".parse().unwrap(), + description: "IPv4 multicast pool for validation tests".to_string(), + }, + IpVersion::V4, + ); + object_create::<_, IpPool>(client, "/v1/system/ip-pools", &pool_params) + .await; + + let range_url = "/v1/system/ip-pools/test-v4-pool/ranges/add"; - // Create a multicast pool with a unique, non-reserved ASM range and link it - create_multicast_ip_pool_with_range( + // IPv4 non-multicast range should be rejected + let ipv4_unicast_range = IpRange::V4( + Ipv4Range::new( + Ipv4Addr::new(10, 0, 0, 1), + Ipv4Addr::new(10, 0, 0, 255), + ) + .unwrap(), + ); + object_create_error( client, - pool_name, - (224, 9, 0, 10), - (224, 9, 0, 255), + range_url, + &ipv4_unicast_range, + StatusCode::BAD_REQUEST, ) .await; - let group_url = mcast_groups_url(); - - // Omit multicast_ip and source_ips keys entirely; specify pool by name - let body = format!( - r#"{{"name":"{group}","description":"Create with omitted optionals","pool":"{pool}"}}"#, - group = group_name, - pool = pool_name, + // IPv4 link-local multicast range should be rejected + let ipv4_link_local_range = IpRange::V4( + Ipv4Range::new( + Ipv4Addr::new(224, 0, 0, 1), + Ipv4Addr::new(224, 0, 0, 255), + ) + .unwrap(), ); - - let created: MulticastGroup = NexusRequest::new( - RequestBuilder::new(client, Method::POST, &group_url) - .header("content-type", "application/json") - .raw_body(Some(body)) - .expect_status(Some(StatusCode::CREATED)), + object_create_error( + client, + range_url, + &ipv4_link_local_range, + StatusCode::BAD_REQUEST, ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .expect("Create with omitted optional fields should succeed") - .parsed_body() - .expect("Should parse created MulticastGroup"); - - assert_eq!(created.identity.name, group_name); - assert!(created.multicast_ip.is_multicast()); - assert!(created.source_ips.is_empty()); + .await; - // Wait for reconciler to activate the group - wait_for_group_active(client, group_name).await; + // Valid IPv4 multicast range should be accepted (using ASM range) + let valid_ipv4_range = IpRange::V4( + Ipv4Range::new( + Ipv4Addr::new(224, 1, 0, 1), + Ipv4Addr::new(224, 1, 0, 255), + ) + .unwrap(), + ); + object_create::<_, IpPoolRange>(client, range_url, &valid_ipv4_range).await; - // Cleanup - object_delete(client, &mcast_group_url(group_name)).await; + // TODO: Remove this test once IPv6 is enabled for multicast pools. + // IPv6 ranges should currently be rejected (not yet supported) + let ipv6_range = IpRange::V6( + Ipv6Range::new( + Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 1), + Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 255), + ) + .unwrap(), + ); + let error = object_create_error( + client, + range_url, + &ipv6_range, + StatusCode::BAD_REQUEST, + ) + .await; + assert_eq!(error.message, "IPv6 ranges are not allowed yet"); } -/// Verify ASM creation with explicit address works when `source_ips` is omitted #[nexus_test] -async fn test_multicast_group_create_raw_asm_omitted_sources( +async fn test_multicast_group_member_operations( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let pool_name = "raw-asm-pool"; - let group_name = "raw-asm-group"; + let project_name = "test-project"; + let group_name = "test-group"; + let instance_name = "test-instance"; - // Pool for allocation (even with explicit IP, current create path validates pool) - create_multicast_ip_pool_with_range( - client, - pool_name, - (224, 10, 0, 10), - (224, 10, 0, 255), + // Create project and IP pools in parallel + let (_, _, _) = ops::join3( + create_project(&client, project_name), + create_default_ip_pool(&client), // For instance networking + create_multicast_ip_pool_with_range( + &client, + "mcast-pool", + (224, 4, 0, 10), + (224, 4, 0, 255), + ), ) .await; - let group_url = mcast_groups_url(); - let body = format!( - r#"{{"name":"{group}","description":"ASM no sources omitted","multicast_ip":"224.10.0.100","pool":"{pool}"}}"#, - group = group_name, - pool = pool_name, + let instance = create_instance(client, project_name, instance_name).await; + let member_add_url = format!( + "{}?project={project_name}", + mcast_group_members_url(group_name) + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: None, + }; + let added_member: MulticastGroupMember = + object_create(client, &member_add_url, &member_params).await; + + assert_eq!( + added_member.instance_id.to_string(), + instance.identity.id.to_string() ); - let created: MulticastGroup = NexusRequest::new( - RequestBuilder::new(client, Method::POST, &group_url) - .header("content-type", "application/json") - .raw_body(Some(body)) - .expect_status(Some(StatusCode::CREATED)), + // Wait for member to become joined + // Member starts in "Joining" state and transitions to "Joined" via reconciler + // Member only transitions to "Joined" after successful DPD update + wait_for_member_state( + cptestctx, + group_name, + instance.identity.id, + nexus_db_model::MulticastGroupMemberState::Joined, ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .expect("ASM creation with omitted source_ips should succeed") - .parsed_body() - .expect("Should parse created MulticastGroup"); + .await; - assert!(created.multicast_ip.is_multicast()); - assert!(created.source_ips.is_empty()); - wait_for_group_active(client, group_name).await; + // Test listing members (should have 1 now in Joined state) + let members = list_multicast_group_members(&client, group_name).await; + assert_eq!(members.len(), 1, "Expected exactly 1 member"); + assert_eq!(members[0].instance_id, added_member.instance_id); + assert_eq!(members[0].multicast_group_id, added_member.multicast_group_id); - object_delete(client, &mcast_group_url(group_name)).await; -} + // Test listing groups (should include our implicitly created group) + let groups = list_multicast_groups(&client).await; + assert!( + groups.iter().any(|g| g.identity.name == group_name), + "Expected group {group_name} to appear in group listing" + ); -/// Verify SSM creation fails when `source_ips` is omitted (missing sources) -#[nexus_test] -async fn test_multicast_group_create_raw_ssm_missing_sources( - cptestctx: &ControlPlaneTestContext, -) { - let client = &cptestctx.external_client; - let pool_name = "raw-ssm-pool"; - let group_name = "raw-ssm-group"; + // DPD Validation: Verify groups exist in dataplane after member addition + let dpd_client = dpd_client(cptestctx); + // Get the multicast IP from the group (since member doesn't have the IP field) + let group_get_url = mcast_group_url(group_name); + let group: MulticastGroup = object_get(client, &group_get_url).await; + let external_multicast_ip = group.multicast_ip; - // Pool for validation - create_multicast_ip_pool_with_range( - client, - pool_name, - (224, 11, 0, 10), - (224, 11, 0, 255), - ) - .await; + // List all groups in DPD to find both external and underlay groups + let dpd_groups = dpd_client + .multicast_groups_list(None, None) + .await + .expect("Should list DPD groups"); + + // Find the external IPv4 group (should exist but may not have members) + let expect_msg = + format!("External group {external_multicast_ip} should exist in DPD"); + dpd_groups + .items + .iter() + .find(|g| { + let ip = match g { + dpd_types::MulticastGroupResponse::External { + group_ip, + .. + } => *group_ip, + dpd_types::MulticastGroupResponse::Underlay { + group_ip, + .. + } => IpAddr::V6(group_ip.0), + }; + ip == external_multicast_ip + && matches!( + g, + dpd_types::MulticastGroupResponse::External { .. } + ) + }) + .expect(&expect_msg); + + // Directly get the underlay IPv6 group by finding the admin-scoped address + // First find the underlay group IP from the list to get the exact IPv6 address + let underlay_ip = dpd_groups + .items + .iter() + .find_map(|g| { + match g { + dpd_types::MulticastGroupResponse::Underlay { + group_ip, + .. + } => { + // Check if it starts with ff04 (admin-scoped multicast) + if group_ip.0.segments()[0] == 0xff04 { + Some(group_ip.clone()) + } else { + None + } + } + dpd_types::MulticastGroupResponse::External { .. } => None, + } + }) + .expect("Should find underlay group IP in DPD response"); + + // Get the underlay group directly + let underlay_group = dpd_client + .multicast_group_get_underlay(&underlay_ip) + .await + .expect("Should get underlay group from DPD"); - let group_url = mcast_groups_url(); - let body = format!( - r#"{{"name":"{group}","description":"SSM missing sources","multicast_ip":"232.1.2.3","pool":"{pool}"}}"#, - group = group_name, - pool = pool_name, + assert_eq!( + underlay_group.members.len(), + 1, + "Underlay group should have exactly 1 member after member addition" ); - let error: HttpErrorResponseBody = NexusRequest::new( - RequestBuilder::new(client, Method::POST, &group_url) - .header("content-type", "application/json") - .raw_body(Some(body)) - .expect_status(Some(StatusCode::BAD_REQUEST)), + // Assert all underlay members use rear (backplane) ports with Underlay direction + for member in &underlay_group.members { + assert!( + matches!(member.port_id, dpd_client::types::PortId::Rear(_)), + "Underlay member should use rear (backplane) port, got: {:?}", + member.port_id + ); + assert_eq!( + member.direction, + dpd_client::types::Direction::Underlay, + "Underlay member should have Underlay direction" + ); + } + + // Test removing instance from multicast group using path-based DELETE + let member_remove_url = format!( + "{}/{instance_name}?project={project_name}", + mcast_group_members_url(group_name) + ); + + NexusRequest::new( + RequestBuilder::new(client, http::Method::DELETE, &member_remove_url) + .expect_status(Some(StatusCode::NO_CONTENT)), ) .authn_as(AuthnMode::PrivilegedUser) .execute() .await - .expect("SSM creation without sources should fail") - .parsed_body() - .expect("Should parse error response body"); + .expect("Should remove member from multicast group"); - assert!( - error - .message - .contains("SSM multicast addresses require at least one source IP"), - "unexpected error message: {}", - error.message - ); + // Implicit deletion model: group is implicitly deleted when last member is removed + // Wait for both Nexus group and DPD group to be deleted + wait_for_group_deleted(client, group_name).await; + wait_for_group_deleted_from_dpd(cptestctx, external_multicast_ip).await; } #[nexus_test] -async fn test_multicast_group_basic_crud(cptestctx: &ControlPlaneTestContext) { +async fn test_instance_multicast_endpoints( + cptestctx: &ControlPlaneTestContext, +) { let client = &cptestctx.external_client; let project_name = "test-project"; - let group_name = "test-group"; - let description = "A test multicast group"; - - // Create a project - create_project(&client, project_name).await; + let group1_name = "mcast-group-1"; + let group2_name = "mcast-group-2"; + let instance_name = "test-instance"; - // Test with explicit multicast pool using unique range for this test - let mcast_pool = create_multicast_ip_pool_with_range( - &client, - "mcast-pool", - (224, 1, 0, 10), - (224, 1, 0, 255), + // Create project and IP pools in parallel + let (_, _, _) = ops::join3( + create_project(&client, project_name), + create_default_ip_pool(&client), + create_multicast_ip_pool_with_range( + &client, + "mcast-pool", + (224, 5, 0, 10), + (224, 5, 0, 255), + ), ) .await; - let group_url = mcast_groups_url(); + // Implicit deletion model: Groups will implicitly create when first instance joins - // Verify empty list initially - let groups = list_multicast_groups(&client).await; - assert_eq!(groups.len(), 0, "Expected empty list of multicast groups"); - - // Test creating a multicast group with auto-allocated IP - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: String::from(group_name).parse().unwrap(), - description: String::from(description), - }, - multicast_ip: None, // Auto-allocate - source_ips: None, // Any-Source Multicast - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; - - let created_group: MulticastGroup = - object_create(client, &group_url, ¶ms).await; - - wait_for_group_active(client, group_name).await; - - assert_eq!(created_group.identity.name, group_name); - assert_eq!(created_group.identity.description, description); - assert!(created_group.multicast_ip.is_multicast()); - assert_eq!(created_group.source_ips.len(), 0); - - // Verify we can list and find it - let groups = list_multicast_groups(&client).await; - assert_eq!(groups.len(), 1, "Expected exactly 1 multicast group"); - assert_groups_eq(&created_group, &groups[0]); - - // Verify we can fetch it directly - let fetched_group_url = mcast_group_url(group_name); - let fetched_group: MulticastGroup = - object_get(client, &fetched_group_url).await; - assert_groups_eq(&created_group, &fetched_group); - - // Test conflict error for duplicate name - object_create_error(client, &group_url, ¶ms, StatusCode::BAD_REQUEST) - .await; - - // Test updating the group - let new_description = "Updated description"; - let update_params = MulticastGroupUpdate { - identity: IdentityMetadataUpdateParams { - name: None, - description: Some(String::from(new_description)), - }, - source_ips: None, - mvlan: None, - }; - - let updated_group: MulticastGroup = - object_put(client, &fetched_group_url, &update_params).await; - assert_eq!(updated_group.identity.description, new_description); - assert_eq!(updated_group.identity.id, created_group.identity.id); - assert!( - updated_group.identity.time_modified - > created_group.identity.time_modified - ); - - // Test deleting the group - object_delete(client, &fetched_group_url).await; - - // Wait for group to be deleted (should return 404) - wait_for_group_deleted(client, group_name).await; - - let groups = list_multicast_groups(&client).await; - assert_eq!(groups.len(), 0, "Expected empty list after deletion"); -} - -#[nexus_test] -async fn test_multicast_group_with_default_pool( - cptestctx: &ControlPlaneTestContext, -) { - let client = &cptestctx.external_client; - let project_name = "test-project"; - let group_name = "test-default-pool-group"; - - // Create a project for testing - create_project(&client, project_name).await; - - // Create multicast IP pool - let pool_params = IpPoolCreate::new_multicast( - omicron_common::api::external::IdentityMetadataCreateParams { - name: "default".parse().unwrap(), - description: "Default multicast IP pool for testing".to_string(), - }, - IpVersion::V4, - ); - - object_create::<_, IpPool>(&client, "/v1/system/ip-pools", &pool_params) - .await; - - // Add IPv4 multicast range - use unique range for this test - let ipv4_range = IpRange::V4( - Ipv4Range::new( - Ipv4Addr::new(224, 8, 0, 10), - Ipv4Addr::new(224, 8, 0, 255), - ) - .unwrap(), - ); - let range_url = "/v1/system/ip-pools/default/ranges/add"; - object_create::<_, IpPoolRange>(&client, range_url, &ipv4_range).await; - - // Link the pool to the silo as the default multicast pool - link_ip_pool(&client, "default", &DEFAULT_SILO.id(), true).await; - - let group_url = "/v1/multicast-groups".to_string(); - - // Test creating with default pool (pool: None) - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: String::from(group_name).parse().unwrap(), - description: "Group using default pool".to_string(), - }, - multicast_ip: None, // Auto-allocate - source_ips: None, // Any-Source Multicast - pool: None, // Use default multicast pool - mvlan: None, - }; - - let created_group: MulticastGroup = - object_create(client, &group_url, ¶ms).await; - assert_eq!(created_group.identity.name, group_name); - assert!(created_group.multicast_ip.is_multicast()); - - wait_for_group_active(client, group_name).await; - - // Clean up - let group_delete_url = mcast_group_url(group_name); - object_delete(client, &group_delete_url).await; - - // Wait for the multicast group reconciler to process the deletion - wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; - - // After reconciler processing, the group should be gone (404) - object_get_error(client, &group_delete_url, StatusCode::NOT_FOUND).await; -} - -#[nexus_test] -async fn test_multicast_group_with_specific_ip( - cptestctx: &ControlPlaneTestContext, -) { - let client = &cptestctx.external_client; - let project_name = "test-project"; - let group_name = "test-group-specific-ip"; - - // Create a project and multicast IP pool - create_project(&client, project_name).await; - let mcast_pool = create_multicast_ip_pool_with_range( - &client, - "mcast-pool", - (224, 2, 0, 10), - (224, 2, 0, 255), - ) - .await; - let group_url = "/v1/multicast-groups".to_string(); - - // Auto-allocation (should work) - let auto_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: String::from(group_name).parse().unwrap(), - description: "Group with auto-allocated IP".to_string(), - }, - multicast_ip: None, // Auto-allocate - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; - - let auto_group: MulticastGroup = - object_create(client, &group_url, &auto_params).await; - - wait_for_group_active(client, group_name).await; - - assert!(auto_group.multicast_ip.is_multicast()); - assert_eq!(auto_group.identity.name, group_name); - assert_eq!(auto_group.identity.description, "Group with auto-allocated IP"); - - // Clean up auto-allocated group - let auto_delete_url = mcast_group_url(group_name); - object_delete(client, &auto_delete_url).await; - - // Wait for the multicast group reconciler to process the deletion - wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; - - // After reconciler processing, the group should be gone (404) - object_get_error(client, &auto_delete_url, StatusCode::NOT_FOUND).await; - - // Explicit IP allocation - let explicit_group_name = "test-group-explicit"; - let ipv4_addr = IpAddr::V4(Ipv4Addr::new(224, 2, 0, 20)); - let explicit_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: explicit_group_name.parse().unwrap(), - description: "Group with explicit IPv4".to_string(), - }, - multicast_ip: Some(ipv4_addr), - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; - - let explicit_group: MulticastGroup = - object_create(client, &group_url, &explicit_params).await; - assert_eq!(explicit_group.multicast_ip, ipv4_addr); - assert_eq!(explicit_group.identity.name, explicit_group_name); - assert_eq!(explicit_group.identity.description, "Group with explicit IPv4"); - - // Wait for explicit group to become active before deletion - wait_for_group_active(client, explicit_group_name).await; - - // Clean up explicit group - let explicit_delete_url = mcast_group_url(explicit_group_name); - object_delete(client, &explicit_delete_url).await; - - // Wait for the multicast group reconciler to process the deletion - wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; - - object_get_error(client, &explicit_delete_url, StatusCode::NOT_FOUND).await; -} - -#[nexus_test] -async fn test_multicast_group_with_source_ips( - cptestctx: &ControlPlaneTestContext, -) { - let client = &cptestctx.external_client; - let project_name = "test-project"; - let group_name = "test-ssm-group"; - - // Create a project and SSM multicast IP pool (232.0.0.0/8 range) - create_project(&client, project_name).await; - create_default_ip_pool(&client).await; // Required for any instance operations - let mcast_pool = create_multicast_ip_pool_with_range( - &client, - "mcast-pool", - (232, 11, 0, 10), // SSM range: 232.11.0.10 - 232.11.0.255 - (232, 11, 0, 255), - ) - .await; - let group_url = "/v1/multicast-groups".to_string(); - - // Test creating with Source-Specific Multicast (SSM) source IPs - // SSM range is 232.0.0.0/8, so we use our unique SSM range - let ssm_ip = IpAddr::V4(Ipv4Addr::new(232, 11, 0, 50)); // From our SSM range - let source_ips = vec![ - IpAddr::V4(Ipv4Addr::new(8, 8, 8, 8)), // Public DNS server - IpAddr::V4(Ipv4Addr::new(1, 1, 1, 1)), // Cloudflare DNS - ]; - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: String::from(group_name).parse().unwrap(), - description: "SSM group with source IPs".to_string(), - }, - multicast_ip: Some(ssm_ip), - source_ips: Some(source_ips.clone()), - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; - - let created_group: MulticastGroup = - object_create(client, &group_url, ¶ms).await; - - // Wait for group to become active - let active_group = wait_for_group_active(client, group_name).await; - - // Verify SSM group properties - assert_eq!(created_group.source_ips, source_ips); - assert_eq!(created_group.multicast_ip, ssm_ip); - assert_eq!(active_group.state, "Active"); - - // DPD Validation: Check that SSM group exists in dataplane - let dpd_client = dpd_client(cptestctx); - let dpd_group = dpd_client - .multicast_group_get(&ssm_ip) - .await - .expect("SSM group should exist in dataplane after creation"); - validate_dpd_group_response( - &dpd_group, - &ssm_ip, - Some(0), // No members initially - "SSM group creation", - ); - - // Clean up - let group_delete_url = mcast_group_url(group_name); - object_delete(client, &group_delete_url).await; - - // Wait for the multicast group reconciler to process the deletion - wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; - - // Verify deletion - object_get_error(client, &group_delete_url, StatusCode::NOT_FOUND).await; -} - -#[nexus_test] -async fn test_multicast_group_validation_errors( - cptestctx: &ControlPlaneTestContext, -) { - let client = &cptestctx.external_client; - let project_name = "test-project"; - - // Create a project and multicast IP pool - create_project(&client, project_name).await; - create_multicast_ip_pool_with_range( - &client, - "mcast-pool", - (224, 3, 0, 10), - (224, 3, 0, 255), - ) - .await; - - let group_url = "/v1/multicast-groups".to_string(); - - // Test with non-multicast IP address - let unicast_ip = IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1)); - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: "invalid-group".parse().unwrap(), - description: "Group with invalid IP".to_string(), - }, - multicast_ip: Some(unicast_ip), - source_ips: None, - pool: None, // Use default pool for validation test - mvlan: None, - }; - - object_create_error(client, &group_url, ¶ms, StatusCode::BAD_REQUEST) - .await; - - // Test with link-local multicast (should be rejected) - let link_local_ip = IpAddr::V4(Ipv4Addr::new(224, 0, 0, 1)); - let params_link_local = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: "link-local-group".parse().unwrap(), - description: "Group with link-local IP".to_string(), - }, - multicast_ip: Some(link_local_ip), - source_ips: None, - pool: None, // Use default pool for validation test - mvlan: None, - }; - - object_create_error( - client, - &group_url, - ¶ms_link_local, - StatusCode::BAD_REQUEST, - ) - .await; - - // Test with IPv6 unicast (should be rejected) - let ipv6_unicast = IpAddr::V6(Ipv6Addr::new( - 0x2001, 0xdb8, 0x1234, 0x5678, 0x9abc, 0xdef0, 0x1234, 0x5678, - )); - let params_ipv6_unicast = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: "ipv6-unicast-group".parse().unwrap(), - description: "Group with IPv6 unicast IP".to_string(), - }, - multicast_ip: Some(ipv6_unicast), - source_ips: None, - pool: None, - mvlan: None, - }; - - object_create_error( - client, - &group_url, - ¶ms_ipv6_unicast, - StatusCode::BAD_REQUEST, - ) - .await; - - // Test with IPv6 interface-local multicast ff01:: (should be rejected) - let ipv6_interface_local = - IpAddr::V6(Ipv6Addr::new(0xff01, 0, 0, 0, 0, 0, 0, 1)); - let params_ipv6_interface_local = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: "ipv6-interface-local-group".parse().unwrap(), - description: "Group with IPv6 interface-local multicast IP" - .to_string(), - }, - multicast_ip: Some(ipv6_interface_local), - source_ips: None, - pool: None, - mvlan: None, - }; - - object_create_error( - client, - &group_url, - ¶ms_ipv6_interface_local, - StatusCode::BAD_REQUEST, - ) - .await; - - // Test with IPv6 link-local multicast ff02:: (should be rejected) - let ipv6_link_local_mcast = - IpAddr::V6(Ipv6Addr::new(0xff02, 0, 0, 0, 0, 0, 0, 1)); - let params_ipv6_link_local = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: "ipv6-link-local-group".parse().unwrap(), - description: "Group with IPv6 link-local multicast IP".to_string(), - }, - multicast_ip: Some(ipv6_link_local_mcast), - source_ips: None, - pool: None, - mvlan: None, - }; - - object_create_error( - client, - &group_url, - ¶ms_ipv6_link_local, - StatusCode::BAD_REQUEST, - ) - .await; -} - -/// Test that multicast IP pools reject invalid ranges at the pool level -#[nexus_test] -async fn test_multicast_ip_pool_range_validation( - cptestctx: &ControlPlaneTestContext, -) { - let client = &cptestctx.external_client; - - // Create IPv4 multicast pool - let pool_params = IpPoolCreate::new_multicast( - IdentityMetadataCreateParams { - name: "test-v4-pool".parse().unwrap(), - description: "IPv4 multicast pool for validation tests".to_string(), - }, - IpVersion::V4, - ); - object_create::<_, IpPool>(client, "/v1/system/ip-pools", &pool_params) - .await; - - let range_url = "/v1/system/ip-pools/test-v4-pool/ranges/add"; - - // IPv4 non-multicast range should be rejected - let ipv4_unicast_range = IpRange::V4( - Ipv4Range::new( - Ipv4Addr::new(10, 0, 0, 1), - Ipv4Addr::new(10, 0, 0, 255), - ) - .unwrap(), - ); - object_create_error( - client, - range_url, - &ipv4_unicast_range, - StatusCode::BAD_REQUEST, - ) - .await; - - // IPv4 link-local multicast range should be rejected - let ipv4_link_local_range = IpRange::V4( - Ipv4Range::new( - Ipv4Addr::new(224, 0, 0, 1), - Ipv4Addr::new(224, 0, 0, 255), - ) - .unwrap(), - ); - object_create_error( - client, - range_url, - &ipv4_link_local_range, - StatusCode::BAD_REQUEST, - ) - .await; - - // Valid IPv4 multicast range should be accepted - let valid_ipv4_range = IpRange::V4( - Ipv4Range::new( - Ipv4Addr::new(239, 0, 0, 1), - Ipv4Addr::new(239, 0, 0, 255), - ) - .unwrap(), - ); - object_create::<_, IpPoolRange>(client, range_url, &valid_ipv4_range).await; - - // TODO: Remove this test once IPv6 is enabled for multicast pools. - // IPv6 ranges should currently be rejected (not yet supported) - let ipv6_range = IpRange::V6( - Ipv6Range::new( - Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 1), - Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 255), - ) - .unwrap(), - ); - let error = object_create_error( - client, - range_url, - &ipv6_range, - StatusCode::BAD_REQUEST, - ) - .await; - assert_eq!(error.message, "IPv6 ranges are not allowed yet"); -} - -#[nexus_test] -async fn test_multicast_group_member_operations( - cptestctx: &ControlPlaneTestContext, -) { - let client = &cptestctx.external_client; - let project_name = "test-project"; - let group_name = "test-group"; - let instance_name = "test-instance"; - - // Create project and IP pools in parallel - let (_, _, mcast_pool) = ops::join3( - create_project(&client, project_name), - create_default_ip_pool(&client), // For instance networking - create_multicast_ip_pool_with_range( - &client, - "mcast-pool", - (224, 4, 0, 10), - (224, 4, 0, 255), - ), - ) - .await; - - // Create multicast group and instance in parallel - let group_url = "/v1/multicast-groups".to_string(); - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: String::from(group_name).parse().unwrap(), - description: "Test group for member operations".to_string(), - }, - multicast_ip: None, - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; - - let (_, instance) = ops::join2( - async { - object_create::<_, MulticastGroup>(client, &group_url, ¶ms) - .await; - wait_for_group_active(client, group_name).await; - }, - create_instance(client, project_name, instance_name), - ) - .await; - - // Test listing members (should be empty initially) - let members = list_multicast_group_members(&client, group_name).await; - assert_eq!(members.len(), 0, "Expected empty member list initially"); - - // Test adding instance to multicast group - let member_add_url = format!( - "{}?project={project_name}", - mcast_group_members_url(group_name) - ); - let member_params = MulticastGroupMemberAdd { - instance: NameOrId::Name(instance_name.parse().unwrap()), - }; - let added_member: MulticastGroupMember = - object_create(client, &member_add_url, &member_params).await; - - assert_eq!( - added_member.instance_id.to_string(), - instance.identity.id.to_string() - ); - - // Wait for member to become joined - // Member starts in "Joining" state and transitions to "Joined" via reconciler - // Member only transitions to "Joined" AFTER successful DPD update - wait_for_member_state( - cptestctx, - group_name, - instance.identity.id, - nexus_db_model::MulticastGroupMemberState::Joined, - ) - .await; - - // Test listing members (should have 1 now in Joined state) - let members = list_multicast_group_members(&client, group_name).await; - assert_eq!(members.len(), 1, "Expected exactly 1 member"); - assert_eq!(members[0].instance_id, added_member.instance_id); - assert_eq!(members[0].multicast_group_id, added_member.multicast_group_id); - - // DPD Validation: Verify groups exist in dataplane after member addition - let dpd_client = dpd_client(cptestctx); - // Get the multicast IP from the group (since member doesn't have the IP field) - let group_get_url = mcast_group_url(group_name); - let group: MulticastGroup = object_get(client, &group_get_url).await; - let external_multicast_ip = group.multicast_ip; - - // List all groups in DPD to find both external and underlay groups - let dpd_groups = dpd_client - .multicast_groups_list(None, None) - .await - .expect("Should list DPD groups"); - - // Find the external IPv4 group (should exist but may not have members) - let expect_msg = - format!("External group {external_multicast_ip} should exist in DPD"); - dpd_groups - .items - .iter() - .find(|g| { - let ip = match g { - dpd_types::MulticastGroupResponse::External { - group_ip, - .. - } => *group_ip, - dpd_types::MulticastGroupResponse::Underlay { - group_ip, - .. - } => IpAddr::V6(group_ip.0), - }; - ip == external_multicast_ip - && matches!( - g, - dpd_types::MulticastGroupResponse::External { .. } - ) - }) - .expect(&expect_msg); - - // Directly get the underlay IPv6 group by finding the admin-scoped address - // First find the underlay group IP from the list to get the exact IPv6 address - let underlay_ip = dpd_groups - .items - .iter() - .find_map(|g| { - match g { - dpd_types::MulticastGroupResponse::Underlay { - group_ip, - .. - } => { - // Check if it starts with ff04 (admin-scoped multicast) - if group_ip.0.segments()[0] == 0xff04 { - Some(group_ip.clone()) - } else { - None - } - } - dpd_types::MulticastGroupResponse::External { .. } => None, - } - }) - .expect("Should find underlay group IP in DPD response"); - - // Get the underlay group directly - let underlay_group = dpd_client - .multicast_group_get_underlay(&underlay_ip) - .await - .expect("Should get underlay group from DPD"); - - assert_eq!( - underlay_group.members.len(), - 1, - "Underlay group should have exactly 1 member after member addition" - ); - - // Assert all underlay members use rear (backplane) ports with Underlay direction - for member in &underlay_group.members { - assert!( - matches!(member.port_id, dpd_client::types::PortId::Rear(_)), - "Underlay member should use rear (backplane) port, got: {:?}", - member.port_id - ); - assert_eq!( - member.direction, - dpd_client::types::Direction::Underlay, - "Underlay member should have Underlay direction" - ); - } - - // Test removing instance from multicast group using path-based DELETE - let member_remove_url = format!( - "{}/{instance_name}?project={project_name}", - mcast_group_members_url(group_name) - ); - - NexusRequest::new( - RequestBuilder::new(client, http::Method::DELETE, &member_remove_url) - .expect_status(Some(StatusCode::NO_CONTENT)), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .expect("Should remove member from multicast group"); - - // Wait for member count to reach 0 after removal - wait_for_member_count(&client, group_name, 0).await; - - // DPD Validation: Verify group has no members in dataplane after removal - let dpd_group = dpd_client.multicast_group_get(&external_multicast_ip).await - .expect("Multicast group should still exist in dataplane after member removal"); - validate_dpd_group_response( - &dpd_group, - &external_multicast_ip, - Some(0), // Should have 0 members after removal - "external group after member removal", - ); - - let group_delete_url = mcast_group_url(group_name); - object_delete(client, &group_delete_url).await; -} - -#[nexus_test] -async fn test_instance_multicast_endpoints( - cptestctx: &ControlPlaneTestContext, -) { - let client = &cptestctx.external_client; - let project_name = "test-project"; - let group1_name = "mcast-group-1"; - let group2_name = "mcast-group-2"; - let instance_name = "test-instance"; - - // Create a project, default unicast pool, and multicast IP pool - create_project(&client, project_name).await; - create_default_ip_pool(&client).await; // For instance networking - let mcast_pool = create_multicast_ip_pool_with_range( - &client, - "mcast-pool", - (224, 5, 0, 10), - (224, 5, 0, 255), - ) - .await; - - // Create two multicast groups in parallel - let group_url = "/v1/multicast-groups".to_string(); - - let group1_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: group1_name.parse().unwrap(), - description: "First test group".to_string(), - }, - multicast_ip: None, - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; - - let group2_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: group2_name.parse().unwrap(), - description: "Second test group".to_string(), - }, - multicast_ip: None, - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; - - // Create both groups in parallel then wait for both to be active - ops::join2( - object_create::<_, MulticastGroup>(client, &group_url, &group1_params), - object_create::<_, MulticastGroup>(client, &group_url, &group2_params), - ) - .await; - - ops::join2( - wait_for_group_active(client, group1_name), - wait_for_group_active(client, group2_name), - ) - .await; - - // Create an instance (starts automatically with create_instance helper) - let instance = create_instance(client, project_name, instance_name).await; - let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); - - // Simulate and wait for instance to be fully running with sled_id assigned - let nexus = &cptestctx.server.server_context().nexus; - instance_simulate(nexus, &instance_id).await; - instance_wait_for_state(client, instance_id, InstanceState::Running).await; - wait_for_instance_sled_assignment(cptestctx, &instance_id).await; - - // Test: List instance multicast groups (should be empty initially) - let instance_groups_url = format!( - "/v1/instances/{instance_name}/multicast-groups?project={project_name}" - ); - let instance_memberships: ResultsPage = - object_get(client, &instance_groups_url).await; - assert_eq!( - instance_memberships.items.len(), - 0, - "Instance should have no multicast memberships initially" - ); - - // Test: Join group1 using instance-centric endpoint - let instance_join_group1_url = format!( - "/v1/instances/{instance_name}/multicast-groups/{group1_name}?project={project_name}" - ); - // Use PUT method but expect 201 Created (not 200 OK like object_put) - // This is correct HTTP semantics - PUT can return 201 when creating new resource - let member1: MulticastGroupMember = NexusRequest::new( - RequestBuilder::new( - client, - http::Method::PUT, - &instance_join_group1_url, - ) - .body(Some(&())) - .expect_status(Some(StatusCode::CREATED)), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); - assert_eq!(member1.instance_id, instance.identity.id); - - // Wait for member to become joined - wait_for_member_state( - cptestctx, - group1_name, - instance.identity.id, - nexus_db_model::MulticastGroupMemberState::Joined, - ) - .await; - - // Test: Verify membership shows up in both endpoints - // Check group-centric view - let group1_members = - list_multicast_group_members(&client, group1_name).await; - assert_eq!(group1_members.len(), 1); - assert_eq!(group1_members[0].instance_id, instance.identity.id); - - // Check instance-centric view (test the list endpoint thoroughly) - let instance_memberships: ResultsPage = - object_get(client, &instance_groups_url).await; - assert_eq!( - instance_memberships.items.len(), - 1, - "Instance should have exactly 1 membership" - ); - assert_eq!(instance_memberships.items[0].instance_id, instance.identity.id); - assert_eq!( - instance_memberships.items[0].multicast_group_id, - member1.multicast_group_id - ); - assert_eq!(instance_memberships.items[0].state, "Joined"); - - // Join group2 using group-centric endpoint (test both directions) - let member_add_url = format!( - "{}?project={project_name}", - mcast_group_members_url(group2_name) - ); - let member_params = MulticastGroupMemberAdd { - instance: NameOrId::Name(instance_name.parse().unwrap()), - }; - let member2: MulticastGroupMember = - object_create(client, &member_add_url, &member_params).await; - assert_eq!(member2.instance_id, instance.identity.id); - - // Wait for member to become joined - wait_for_member_state( - cptestctx, - group2_name, - instance.identity.id, - nexus_db_model::MulticastGroupMemberState::Joined, - ) - .await; - - // Verify instance now belongs to both groups (comprehensive list test) - let instance_memberships: ResultsPage = - object_get(client, &instance_groups_url).await; - assert_eq!( - instance_memberships.items.len(), - 2, - "Instance should belong to both groups" - ); - - // Verify the list endpoint returns the correct membership details - let membership_group_ids: Vec<_> = instance_memberships - .items - .iter() - .map(|m| m.multicast_group_id) - .collect(); - assert!( - membership_group_ids.contains(&member1.multicast_group_id), - "List should include group1 membership" - ); - assert!( - membership_group_ids.contains(&member2.multicast_group_id), - "List should include group2 membership" - ); - - // Verify all memberships show correct instance_id and state - for membership in &instance_memberships.items { - assert_eq!(membership.instance_id, instance.identity.id); - assert_eq!(membership.state, "Joined"); - } - - // Verify each group shows the instance as a member - let group1_members = - list_multicast_group_members(&client, group1_name).await; - let group2_members = - list_multicast_group_members(&client, group2_name).await; - assert_eq!(group1_members.len(), 1); - assert_eq!(group2_members.len(), 1); - assert_eq!(group1_members[0].instance_id, instance.identity.id); - assert_eq!(group2_members[0].instance_id, instance.identity.id); - - // Leave group1 using instance-centric endpoint - let instance_leave_group1_url = format!( - "/v1/instances/{instance_name}/multicast-groups/{group1_name}?project={project_name}" - ); - object_delete(client, &instance_leave_group1_url).await; - - // Wait for reconciler to process the removal and completely delete the member - wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; - - // Verify membership removed from both views - // Check instance-centric view - should only show active memberships (group2) - let instance_memberships: ResultsPage = - object_get(client, &instance_groups_url).await; - assert_eq!( - instance_memberships.items.len(), - 1, - "Instance should only show active membership (group2)" - ); - assert_eq!( - instance_memberships.items[0].multicast_group_id, - member2.multicast_group_id, - "Remaining membership should be group2" - ); - assert_eq!( - instance_memberships.items[0].state, "Joined", - "Group2 membership should be Joined" - ); - - // Check group-centric views - let group1_members = - list_multicast_group_members(&client, group1_name).await; - let group2_members = - list_multicast_group_members(&client, group2_name).await; - assert_eq!(group1_members.len(), 0, "Group1 should have no members"); - assert_eq!(group2_members.len(), 1, "Group2 should still have 1 member"); - - // Leave group2 using group-centric endpoint - let member_remove_url = format!( - "{}/{instance_name}?project={project_name}", - mcast_group_members_url(group2_name) - ); - - NexusRequest::new( - RequestBuilder::new(client, http::Method::DELETE, &member_remove_url) - .expect_status(Some(StatusCode::NO_CONTENT)), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .expect("Should remove member from group2"); - - // Wait for reconciler to process the removal - wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; - - // Verify all memberships are gone - let instance_memberships: ResultsPage = - object_get(client, &instance_groups_url).await; - assert_eq!( - instance_memberships.items.len(), - 0, - "Instance should have no memberships" - ); - - let group1_members = - list_multicast_group_members(&client, group1_name).await; - let group2_members = - list_multicast_group_members(&client, group2_name).await; - assert_eq!(group1_members.len(), 0); - assert_eq!(group2_members.len(), 0); - - // Clean up - let group1_delete_url = mcast_group_url(group1_name); - let group2_delete_url = mcast_group_url(group2_name); - - object_delete(client, &group1_delete_url).await; - object_delete(client, &group2_delete_url).await; -} - -#[nexus_test] -async fn test_multicast_group_member_errors( - cptestctx: &ControlPlaneTestContext, -) { - let client = &cptestctx.external_client; - let project_name = "test-project"; - let group_name = "test-group"; - let nonexistent_instance = "nonexistent-instance"; - - // Create a project and multicast IP pool - create_project(&client, project_name).await; - let mcast_pool = create_multicast_ip_pool_with_range( - &client, - "mcast-pool", - (224, 6, 0, 10), - (224, 6, 0, 255), - ) - .await; - - // Create a multicast group - let group_url = "/v1/multicast-groups".to_string(); - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: String::from(group_name).parse().unwrap(), - description: "Test group for error cases".to_string(), - }, - multicast_ip: None, - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; - object_create::<_, MulticastGroup>(client, &group_url, ¶ms).await; - - // Wait for group to become active before testing member operations - wait_for_group_active(&client, group_name).await; - - // Test adding nonexistent instance to group - let member_add_url = format!( - "{}?project={project_name}", - mcast_group_members_url(group_name) - ); - let member_params = MulticastGroupMemberAdd { - instance: NameOrId::Name(nonexistent_instance.parse().unwrap()), - }; - object_create_error( - client, - &member_add_url, - &member_params, - StatusCode::NOT_FOUND, - ) - .await; - - // Test adding member to nonexistent group - let nonexistent_group = "nonexistent-group"; - let member_add_bad_group_url = format!( - "{}?project={project_name}", - mcast_group_members_url(nonexistent_group) - ); - object_create_error( - client, - &member_add_bad_group_url, - &member_params, - StatusCode::NOT_FOUND, - ) - .await; - - // Clean up - follow standard deletion pattern - let group_delete_url = mcast_group_url(group_name); - object_delete(client, &group_delete_url).await; -} - -#[nexus_test] -async fn test_lookup_multicast_group_by_ip( - cptestctx: &ControlPlaneTestContext, -) { - let client = &cptestctx.external_client; - let project_name = "test-project"; - let group_name = "test-lookup-group"; - - // Create a project and multicast IP pool - create_project(&client, project_name).await; - let mcast_pool = create_multicast_ip_pool_with_range( - &client, - "mcast-pool", - (224, 7, 0, 10), - (224, 7, 0, 255), - ) - .await; - - // Create a multicast group with specific IP - use safe IP range - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 7, 0, 100)); - let group_url = "/v1/multicast-groups".to_string(); - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: String::from(group_name).parse().unwrap(), - description: "Group for IP lookup test".to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; - let created_group: MulticastGroup = - object_create(client, &group_url, ¶ms).await; - - // Wait for group to become active - follow working pattern - wait_for_group_active(&client, group_name).await; - - // Test lookup by IP - let lookup_url = - format!("/v1/system/multicast-groups/by-ip/{multicast_ip}"); - let found_group: MulticastGroup = object_get(client, &lookup_url).await; - assert_groups_eq(&created_group, &found_group); - - // Test lookup with nonexistent IP - let nonexistent_ip = IpAddr::V4(Ipv4Addr::new(224, 0, 1, 200)); - let lookup_bad_url = - format!("/v1/system/multicast-groups/by-ip/{nonexistent_ip}"); - - object_get_error(client, &lookup_bad_url, StatusCode::NOT_FOUND).await; - - // Clean up - follow standard deletion pattern - let group_delete_url = mcast_group_url(group_name); - object_delete(client, &group_delete_url).await; -} - -#[nexus_test] -async fn test_instance_deletion_removes_multicast_memberships( - cptestctx: &ControlPlaneTestContext, -) { - let client = &cptestctx.external_client; - let project_name = "springfield-squidport"; // Use the same project name as instance helpers - let group_name = "instance-deletion-group"; - let instance_name = "deletion-test-instance"; - - // Setup: project, pools, group with unique IP range - create_project(&client, project_name).await; - create_default_ip_pool(&client).await; - let mcast_pool = create_multicast_ip_pool_with_range( - &client, - "mcast-pool", - (224, 9, 0, 10), - (224, 9, 0, 255), - ) - .await; - - // Create multicast group - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 9, 0, 50)); // Use IP from our range - let group_url = "/v1/multicast-groups".to_string(); - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: String::from(group_name).parse().unwrap(), - description: "Group for instance deletion test".to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; - - let created_group: MulticastGroup = - object_create(client, &group_url, ¶ms).await; + // Create an instance (starts automatically with create_instance helper) + let instance = create_instance(client, project_name, instance_name).await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); - // Wait for group to become active - wait_for_group_active(&client, group_name).await; + // Simulate and wait for instance to be fully running with sled_id assigned + let nexus = &cptestctx.server.server_context().nexus; + instance_simulate(nexus, &instance_id).await; + instance_wait_for_state(client, instance_id, InstanceState::Running).await; + wait_for_instance_sled_assignment(cptestctx, &instance_id).await; - // Create instance and add as member - let instance = create_instance(client, project_name, instance_name).await; - let member_add_url = format!( - "{}?project={project_name}", - mcast_group_members_url(group_name) + // Case: List instance multicast groups (should be empty initially) + let instance_groups_url = format!( + "/v1/instances/{instance_name}/multicast-groups?project={project_name}" + ); + let instance_memberships: ResultsPage = + object_get(client, &instance_groups_url).await; + assert_eq!( + instance_memberships.items.len(), + 0, + "Instance should have no multicast memberships initially" ); - let member_params = MulticastGroupMemberAdd { - instance: NameOrId::Name(instance_name.parse().unwrap()), - }; - object_create::<_, MulticastGroupMember>( - client, - &member_add_url, - &member_params, + // Case: Join group1 using instance-centric endpoint (implicitly creates group1) + let instance_join_group1_url = format!( + "/v1/instances/{instance_name}/multicast-groups/{group1_name}?project={project_name}" + ); + let join_params = InstanceMulticastGroupJoin { source_ips: None }; + // Use PUT method and expect 201 Created (implicitly creating group1) + let member1: MulticastGroupMember = NexusRequest::new( + RequestBuilder::new( + client, + http::Method::PUT, + &instance_join_group1_url, + ) + .body(Some(&join_params)) + .expect_status(Some(StatusCode::CREATED)), ) - .await; + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + assert_eq!(member1.instance_id, instance.identity.id); - // Wait for member to join + // Wait for group1 to become active after implicitly create + wait_for_group_active(client, group1_name).await; + + // Wait for member to become joined wait_for_member_state( cptestctx, - group_name, + group1_name, instance.identity.id, nexus_db_model::MulticastGroupMemberState::Joined, ) .await; - // Verify member was added - let members = list_multicast_group_members(&client, group_name).await; - assert_eq!(members.len(), 1, "Instance should be a member of the group"); - assert_eq!(members[0].instance_id, instance.identity.id); - - // Test: Instance deletion should clean up multicast memberships - // Use the helper function for proper instance deletion (handles Starting state) - cleanup_instances(cptestctx, client, project_name, &[instance_name]).await; - - // Verify instance is gone - let instance_url = - format!("/v1/instances/{instance_name}?project={project_name}"); - - object_get_error(client, &instance_url, StatusCode::NOT_FOUND).await; - - // Critical test: Verify instance was automatically removed from multicast group - wait_for_member_count(&client, group_name, 0).await; + // Case: Verify membership shows up in both endpoints + // Check group-centric view + let group1_members = + list_multicast_group_members(&client, group1_name).await; + assert_eq!(group1_members.len(), 1); + assert_eq!(group1_members[0].instance_id, instance.identity.id); - // DPD Validation: Ensure dataplane members are cleaned up - let dpd_client = dpd_client(cptestctx); - let dpd_group = dpd_client.multicast_group_get(&multicast_ip).await - .expect("Multicast group should still exist in dataplane after instance deletion"); - validate_dpd_group_response( - &dpd_group, - &multicast_ip, - Some(0), // Should have 0 members after instance deletion - "external group after instance deletion", + // Check instance-centric view (test the list endpoint thoroughly) + let instance_memberships: ResultsPage = + object_get(client, &instance_groups_url).await; + assert_eq!( + instance_memberships.items.len(), + 1, + "Instance should have exactly 1 membership" ); + assert_eq!(instance_memberships.items[0].instance_id, instance.identity.id); + assert_eq!( + instance_memberships.items[0].multicast_group_id, + member1.multicast_group_id + ); + assert_eq!(instance_memberships.items[0].state, "Joined"); - // Verify group still exists (just no members) - let group_get_url = mcast_group_url(group_name); - let group_after_deletion: MulticastGroup = - object_get(client, &group_get_url).await; - assert_eq!(group_after_deletion.identity.id, created_group.identity.id); - - // Clean up - object_delete(client, &group_get_url).await; -} - -#[nexus_test] -async fn test_member_operations_via_rpw_reconciler( - cptestctx: &ControlPlaneTestContext, -) { - let client = &cptestctx.external_client; - let project_name = "test-project"; - let group_name = "rpw-test-group"; - let instance_name = "rpw-test-instance"; - - // Setup: project, pools, group with unique IP range - create_project(&client, project_name).await; - create_default_ip_pool(&client).await; - let mcast_pool = create_multicast_ip_pool_with_range( - &client, - "mcast-pool", - (224, 10, 0, 10), - (224, 10, 0, 255), - ) - .await; - - // Create multicast group - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 10, 0, 50)); // Use IP from our range - let group_url = "/v1/multicast-groups".to_string(); - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: String::from(group_name).parse().unwrap(), - description: "Group for RPW member operations test".to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; - - let created_group: MulticastGroup = - object_create(client, &group_url, ¶ms).await; - - // Wait for group to become active - wait_for_group_active(&client, group_name).await; - - assert_eq!(created_group.multicast_ip, multicast_ip); - assert_eq!(created_group.identity.name, group_name); - - // Create instance - let instance = create_instance(client, project_name, instance_name).await; - - // Test: Add member via API (should use RPW pattern via reconciler) + // Join group2 using group-centric endpoint (implicitly creates group2, test both directions) let member_add_url = format!( "{}?project={project_name}", - mcast_group_members_url(group_name) + mcast_group_members_url(group2_name) ); let member_params = MulticastGroupMemberAdd { instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: None, }; - let added_member: MulticastGroupMember = + let member2: MulticastGroupMember = object_create(client, &member_add_url, &member_params).await; + assert_eq!(member2.instance_id, instance.identity.id); + + // Wait for group2 to become active after implicitly create + wait_for_group_active(client, group2_name).await; // Wait for member to become joined wait_for_member_state( cptestctx, - group_name, + group2_name, instance.identity.id, nexus_db_model::MulticastGroupMemberState::Joined, ) .await; - // Verify member was added and reached Joined state - let members = list_multicast_group_members(&client, group_name).await; - assert_eq!(members.len(), 1, "Member should be added to group"); - assert_eq!(members[0].instance_id, added_member.instance_id); - assert_eq!(members[0].state, "Joined", "Member should be in Joined state"); - - // DPD Validation: Check external group configuration - let dpd_client = dpd_client(cptestctx); - let dpd_group = dpd_client - .multicast_group_get(&multicast_ip) - .await - .expect("Multicast group should exist in dataplane after member join"); - validate_dpd_group_response( - &dpd_group, - &multicast_ip, - None, // Don't assert member count due to timing - "external group after member join", - ); - - // Test: Remove member via API (should use RPW pattern via reconciler) - let member_remove_url = format!( - "{}/{instance_name}?project={project_name}", - mcast_group_members_url(group_name) - ); - - NexusRequest::new( - RequestBuilder::new(client, http::Method::DELETE, &member_remove_url) - .expect_status(Some(StatusCode::NO_CONTENT)), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .expect("Should remove member from multicast group"); - - // Verify member was removed (wait for member count to reach 0) - wait_for_member_count(&client, group_name, 0).await; - - // DPD Validation: Check group has no members after removal - let dpd_group = dpd_client.multicast_group_get(&multicast_ip).await.expect( - "Multicast group should still exist in dataplane after member removal", - ); - validate_dpd_group_response( - &dpd_group, - &multicast_ip, - Some(0), // Should have 0 members after removal - "external group after member removal", + // Verify instance now belongs to both groups (comprehensive list test) + let instance_memberships: ResultsPage = + object_get(client, &instance_groups_url).await; + assert_eq!( + instance_memberships.items.len(), + 2, + "Instance should belong to both groups" ); - // Clean up - reconciler is automatically activated by deletion - let group_delete_url = mcast_group_url(group_name); - object_delete(client, &group_delete_url).await; -} - -/// Test comprehensive multicast group update operations including the update saga. -/// Tests both description-only updates (no saga) and name updates (requires saga). -#[nexus_test] -async fn test_multicast_group_comprehensive_updates( - cptestctx: &ControlPlaneTestContext, -) { - let client = &cptestctx.external_client; - let project_name = "update-test-project"; - let original_name = "original-group"; - let updated_name = "updated-group"; - let final_name = "final-group"; - let original_description = "Original description"; - let updated_description = "Updated description"; - let final_description = "Final description"; - - // Create project and IP pool - create_project(&client, project_name).await; - let mcast_pool = create_multicast_ip_pool_with_range( - &client, - "update-test-pool", - (224, 11, 0, 10), - (224, 11, 0, 255), - ) - .await; - - // Create multicast group - let group_url = "/v1/multicast-groups".to_string(); - let create_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: String::from(original_name).parse().unwrap(), - description: String::from(original_description), - }, - multicast_ip: None, - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; - - let created_group: MulticastGroup = - object_create(client, &group_url, &create_params).await; - - wait_for_group_active(client, original_name).await; - - let original_group_url = mcast_group_url(original_name); - - // Description-only update (no saga required) - let description_update = MulticastGroupUpdate { - identity: IdentityMetadataUpdateParams { - name: None, // Keep same name - description: Some(String::from(updated_description)), - }, - source_ips: None, - mvlan: None, - }; - - let desc_updated_group: MulticastGroup = - object_put(client, &original_group_url, &description_update).await; - - // No wait needed for description-only updates - assert_eq!(desc_updated_group.identity.name, original_name); - assert_eq!(desc_updated_group.identity.description, updated_description); - assert_eq!(desc_updated_group.identity.id, created_group.identity.id); + // Verify the list endpoint returns the correct membership details + let membership_group_ids: Vec<_> = instance_memberships + .items + .iter() + .map(|m| m.multicast_group_id) + .collect(); assert!( - desc_updated_group.identity.time_modified - > created_group.identity.time_modified + membership_group_ids.contains(&member1.multicast_group_id), + "List should include group1 membership" ); - - // Name-only update (requires update saga) - let name_update = MulticastGroupUpdate { - identity: IdentityMetadataUpdateParams { - name: Some(String::from(updated_name).parse().unwrap()), - description: None, // Keep current description - }, - source_ips: None, - mvlan: None, - }; - - let name_updated_group: MulticastGroup = - object_put(client, &original_group_url, &name_update).await; - - // Wait for update saga to complete DPD configuration application - // Name updates don't change DPD state, just verify saga completed without errors - wait_for_group_dpd_update( - cptestctx, - &created_group.multicast_ip, - dpd_predicates::expect_external_group(), - "name update saga completed", - ) - .await; - - // Verify name update worked - assert_eq!(name_updated_group.identity.name, updated_name); - assert_eq!(name_updated_group.identity.description, updated_description); // Should keep previous description - assert_eq!(name_updated_group.identity.id, created_group.identity.id); assert!( - name_updated_group.identity.time_modified - > desc_updated_group.identity.time_modified + membership_group_ids.contains(&member2.multicast_group_id), + "List should include group2 membership" ); - // Verify we can access with new name - let updated_group_url = mcast_group_url(updated_name); - let fetched_group: MulticastGroup = - object_get(client, &updated_group_url).await; - assert_eq!(fetched_group.identity.name, updated_name); - - // Verify old name is no longer accessible - object_get_error(client, &original_group_url, StatusCode::NOT_FOUND).await; + // Verify all memberships show correct instance_id and state + for membership in &instance_memberships.items { + assert_eq!(membership.instance_id, instance.identity.id); + assert_eq!(membership.state, "Joined"); + } - // Combined name and description update (requires saga) - let combined_update = MulticastGroupUpdate { - identity: IdentityMetadataUpdateParams { - name: Some(String::from(final_name).parse().unwrap()), - description: Some(String::from(final_description)), - }, - source_ips: None, - mvlan: None, - }; + // Verify each group shows the instance as a member + let group1_members = + list_multicast_group_members(&client, group1_name).await; + let group2_members = + list_multicast_group_members(&client, group2_name).await; + assert_eq!(group1_members.len(), 1); + assert_eq!(group2_members.len(), 1); + assert_eq!(group1_members[0].instance_id, instance.identity.id); + assert_eq!(group2_members[0].instance_id, instance.identity.id); - let final_updated_group: MulticastGroup = - object_put(client, &updated_group_url, &combined_update).await; + // Leave group1 using instance-centric endpoint + let instance_leave_group1_url = format!( + "/v1/instances/{instance_name}/multicast-groups/{group1_name}?project={project_name}" + ); + object_delete(client, &instance_leave_group1_url).await; - // Wait for update saga to complete - // Combined name+description updates don't change DPD state - wait_for_group_dpd_update( - cptestctx, - &created_group.multicast_ip, - dpd_predicates::expect_external_group(), - "combined name+description update saga completed", - ) - .await; + // Implicit deletion model: group1 should be deleted after last member leaves + wait_for_group_deleted(client, group1_name).await; - // Verify combined update worked - assert_eq!(final_updated_group.identity.name, final_name); - assert_eq!(final_updated_group.identity.description, final_description); - assert_eq!(final_updated_group.identity.id, created_group.identity.id); - assert!( - final_updated_group.identity.time_modified - > name_updated_group.identity.time_modified + // Verify membership removed from both views + // Check instance-centric view - should only show active memberships (group2) + let instance_memberships: ResultsPage = + object_get(client, &instance_groups_url).await; + assert_eq!( + instance_memberships.items.len(), + 1, + "Instance should only show active membership (group2)" + ); + assert_eq!( + instance_memberships.items[0].multicast_group_id, + member2.multicast_group_id, + "Remaining membership should be group2" + ); + assert_eq!( + instance_memberships.items[0].state, "Joined", + "Group2 membership should be Joined" ); - // Verify group remains active through updates - let final_group_url = mcast_group_url(final_name); - wait_for_group_active(client, final_name).await; - - // DPD validation - let dpd_client = dpd_client(cptestctx); - match dpd_client - .multicast_group_get(&final_updated_group.multicast_ip) - .await - { - Ok(dpd_group) => { - let group_data = dpd_group.into_inner(); - let tag = match &group_data { - dpd_types::MulticastGroupResponse::External { tag, .. } => { - tag.as_deref() - } - dpd_types::MulticastGroupResponse::Underlay { tag, .. } => { - tag.as_deref() - } - }; - assert_eq!( - tag, - Some(final_name), - "DPD group tag should match final group name" - ); - } - Err(DpdError::ErrorResponse(resp)) - if resp.status() == reqwest::StatusCode::NOT_FOUND => {} - Err(_) => {} - } + // Check group2 still has the member (group1 is already deleted) + let group2_members = + list_multicast_group_members(&client, group2_name).await; + assert_eq!(group2_members.len(), 1, "Group2 should still have 1 member"); - // Clean up - object_delete(client, &final_group_url).await; -} + // Leave group2 using group-centric endpoint + let member_remove_url = format!( + "{}/{instance_name}?project={project_name}", + mcast_group_members_url(group2_name) + ); -/// Validate DPD multicast group response with comprehensive checks -fn validate_dpd_group_response( - dpd_group: &dpd_types::MulticastGroupResponse, - expected_ip: &IpAddr, - expected_member_count: Option, - test_context: &str, -) { - // Basic validation using our utility function - let ip = match dpd_group { - dpd_types::MulticastGroupResponse::External { group_ip, .. } => { - *group_ip - } - dpd_types::MulticastGroupResponse::Underlay { group_ip, .. } => { - IpAddr::V6(group_ip.0) - } - }; - assert_eq!(ip, *expected_ip, "DPD group IP mismatch in {test_context}"); - - match dpd_group { - dpd_types::MulticastGroupResponse::External { - external_group_id, - .. - } => { - if let Some(_expected_count) = expected_member_count { - // External groups typically don't have direct members, - // but we can validate if they do - // Note: External groups may not expose member count directly - eprintln!( - "Note: External group member validation skipped in {test_context}" - ); - } + NexusRequest::new( + RequestBuilder::new(client, http::Method::DELETE, &member_remove_url) + .expect_status(Some(StatusCode::NO_CONTENT)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("Should remove member from group2"); - // Validate external group specific fields - assert_ne!( - *external_group_id, 0, - "DPD external_group_id should be non-zero in {test_context}" - ); - } - dpd_types::MulticastGroupResponse::Underlay { - members, - external_group_id, - underlay_group_id, - .. - } => { - if let Some(expected_count) = expected_member_count { - assert_eq!( - members.len(), - expected_count, - "DPD underlay group member count mismatch in {test_context}: expected {expected_count}, got {}", - members.len() - ); - } + // Wait for reconciler to process the removal + wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; - // Assert all underlay members use rear (backplane) ports with Underlay direction - for member in members { - assert!( - matches!( - member.port_id, - dpd_client::types::PortId::Rear(_) - ), - "Underlay member should use rear (backplane) port, got: {:?}", - member.port_id - ); - assert_eq!( - member.direction, - dpd_client::types::Direction::Underlay, - "Underlay member should have Underlay direction" - ); - } + // Verify all memberships are gone + let instance_memberships: ResultsPage = + object_get(client, &instance_groups_url).await; + assert_eq!( + instance_memberships.items.len(), + 0, + "Instance should have no memberships" + ); - // Validate underlay group specific fields - assert_ne!( - *external_group_id, 0, - "DPD external_group_id should be non-zero in {test_context}" - ); - assert_ne!( - *underlay_group_id, 0, - "DPD underlay_group_id should be non-zero in {test_context}" - ); - } - } + // Implicit deletion model: Groups should be implicitly deleted after last member removed + ops::join2( + wait_for_group_deleted(client, group1_name), + wait_for_group_deleted(client, group2_name), + ) + .await; } -/// Test source_ips updates and multicast group validation. -/// Verifies proper ASM/SSM handling, validation of invalid transitions, and mixed pool allocation. #[nexus_test] -async fn test_multicast_source_ips_update(cptestctx: &ControlPlaneTestContext) { +async fn test_multicast_group_member_errors( + cptestctx: &ControlPlaneTestContext, +) { let client = &cptestctx.external_client; - let project_name = "source-update-project"; - - // Create project and separate ASM and SSM pools - create_project(&client, project_name).await; - - // Create ASM pool for ASM testing - let asm_pool = create_multicast_ip_pool_with_range( - &client, - "asm-update-pool", - (224, 99, 0, 10), - (224, 99, 0, 50), - ) - .await; + let project_name = "test-project"; + let group_name = "test-group"; + let nonexistent_instance = "nonexistent-instance"; - // Create SSM pool for SSM testing - let ssm_pool = create_multicast_ip_pool_with_range( - &client, - "ssm-update-pool", - (232, 99, 0, 10), - (232, 99, 0, 50), + // Create project and IP pools in parallel + let (_, _, _) = ops::join3( + create_project(&client, project_name), + create_default_ip_pool(&client), + create_multicast_ip_pool_with_range( + &client, + "mcast-pool", + (224, 6, 0, 10), + (224, 6, 0, 255), + ), ) .await; - let group_url = "/v1/multicast-groups".to_string(); - - // Negative: creating in SSM pool without sources should be rejected - let ssm_no_sources = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: "ssm-no-sources".parse().unwrap(), - description: "should fail: SSM pool requires sources".to_string(), - }, - multicast_ip: None, // implicit allocation - source_ips: None, // missing sources in SSM pool - pool: Some(NameOrId::Name(ssm_pool.identity.name.clone())), - mvlan: None, - }; - object_create_error( - client, - &group_url, - &ssm_no_sources, - StatusCode::BAD_REQUEST, - ) - .await; + // Implicitly create a multicast group by adding an instance as first member + let instance_name = "test-instance"; + create_instance(client, project_name, instance_name).await; - // Negative: creating in ASM pool with sources (implicit IP) should be rejected - let asm_with_sources = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: "asm-with-sources".parse().unwrap(), - description: - "should fail: ASM pool cannot allocate SSM with sources" - .to_string(), - }, - multicast_ip: None, // implicit allocation - source_ips: Some(vec!["10.10.10.10".parse().unwrap()]), // sources present - pool: Some(NameOrId::Name(asm_pool.identity.name.clone())), - mvlan: None, - }; - let err2: HttpErrorResponseBody = object_create_error( - client, - &group_url, - &asm_with_sources, - StatusCode::BAD_REQUEST, - ) - .await; - assert!( - err2.message - .contains("Cannot allocate SSM multicast group from ASM pool"), - "Expected ASM pool + sources to be rejected, got: {}", - err2.message + let member_add_url = format!( + "{}?project={project_name}", + mcast_group_members_url(group_name) ); - - // Create ASM group (no sources) - let asm_group_name = "asm-group"; - let asm_create_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: String::from(asm_group_name).parse().unwrap(), - description: "ASM group for testing".to_string(), - }, - multicast_ip: None, - source_ips: None, // No sources = ASM - pool: Some(NameOrId::Name(asm_pool.identity.name.clone())), - mvlan: None, + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: None, }; - - let asm_group = object_create::<_, MulticastGroup>( + object_create::<_, MulticastGroupMember>( client, - &group_url, - &asm_create_params, + &member_add_url, + &member_params, ) .await; - wait_for_group_active(client, asm_group_name).await; - - // Verify ASM group allocation (should get any available multicast address) - assert!( - asm_group.source_ips.is_empty(), - "ASM group should have no sources" - ); - // ASM group updates (valid operations) + // Wait for group to become active before testing error cases + wait_for_group_active(&client, group_name).await; - // Description-only update (always valid) - let description_update = MulticastGroupUpdate { - identity: IdentityMetadataUpdateParams { - name: None, - description: Some("Updated ASM description".to_string()), - }, + // Test adding nonexistent instance to group + let member_add_url = format!( + "{}?project={project_name}", + mcast_group_members_url(group_name) + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name(nonexistent_instance.parse().unwrap()), source_ips: None, - mvlan: None, - }; - let updated_asm: MulticastGroup = object_put( - client, - &mcast_group_url(asm_group_name), - &description_update, - ) - .await; - assert_eq!(updated_asm.identity.description, "Updated ASM description"); - assert!(updated_asm.source_ips.is_empty()); - - // Try invalid ASM→SSM transition (should be rejected) - let invalid_ssm_update = MulticastGroupUpdate { - identity: IdentityMetadataUpdateParams { - name: None, - description: None, - }, - source_ips: Some(vec!["10.1.1.1".parse().unwrap()]), // Try to add sources - mvlan: None, }; - - object_put_error( + object_create_error( client, - &mcast_group_url(asm_group_name), - &invalid_ssm_update, - StatusCode::BAD_REQUEST, + &member_add_url, + &member_params, + StatusCode::NOT_FOUND, ) .await; - // Create SSM group from scratch (with explicit SSM IP and sources) - let ssm_group_name = "ssm-group"; - let ssm_create_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: String::from(ssm_group_name).parse().unwrap(), - description: "SSM group with explicit SSM address".to_string(), - }, - multicast_ip: Some("232.99.0.20".parse().unwrap()), // Explicit SSM IP required - source_ips: Some(vec!["10.2.2.2".parse().unwrap()]), // SSM sources from start - pool: Some(NameOrId::Name(ssm_pool.identity.name.clone())), - mvlan: None, - }; - - let ssm_group = object_create::<_, MulticastGroup>( - client, - &group_url, - &ssm_create_params, - ) - .await; - wait_for_group_active(client, ssm_group_name).await; - - // Verify SSM group has correct explicit IP and sources - assert_eq!(ssm_group.multicast_ip.to_string(), "232.99.0.20"); - assert_eq!(ssm_group.source_ips.len(), 1); - assert_eq!(ssm_group.source_ips[0].to_string(), "10.2.2.2"); - - // Create SSM group with mvlan at creation time - let ssm_with_mvlan_name = "ssm-group-with-mvlan"; - let ssm_with_mvlan_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: String::from(ssm_with_mvlan_name).parse().unwrap(), - description: "SSM group created with mvlan".to_string(), - }, - multicast_ip: Some("232.99.0.30".parse().unwrap()), - source_ips: Some(vec!["10.7.7.7".parse().unwrap()]), - pool: Some(NameOrId::Name(ssm_pool.identity.name.clone())), - mvlan: Some(VlanID::new(2048).unwrap()), // Create with mvlan - }; - let ssm_with_mvlan_created = object_create::<_, MulticastGroup>( + // Test adding member to nonexistent group + let nonexistent_group = "nonexistent-group"; + let member_add_bad_group_url = format!( + "{}?project={project_name}", + mcast_group_members_url(nonexistent_group) + ); + object_create_error( client, - &group_url, - &ssm_with_mvlan_params, + &member_add_bad_group_url, + &member_params, + StatusCode::NOT_FOUND, ) .await; - wait_for_group_active(client, ssm_with_mvlan_name).await; - assert_eq!(ssm_with_mvlan_created.multicast_ip.to_string(), "232.99.0.30"); - assert_eq!(ssm_with_mvlan_created.source_ips.len(), 1); - assert_eq!( - ssm_with_mvlan_created.mvlan, - Some(VlanID::new(2048).unwrap()), - "SSM group should be created with mvlan" - ); - - // Valid SSM group updates + cleanup_instances(cptestctx, client, project_name, &[instance_name]).await; + wait_for_group_deleted(client, group_name).await; +} - // Update SSM sources (valid - SSM→SSM) - let ssm_update = MulticastGroupUpdate { - identity: IdentityMetadataUpdateParams { - name: None, - description: None, - }, - source_ips: Some(vec![ - "10.3.3.3".parse().unwrap(), - "10.3.3.4".parse().unwrap(), - ]), - mvlan: None, - }; - let updated_ssm: MulticastGroup = - object_put(client, &mcast_group_url(ssm_group_name), &ssm_update).await; +#[nexus_test] +async fn test_lookup_multicast_group_by_ip( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + let project_name = "test-project"; + let group_name = "test-lookup-group"; - // Wait for update saga to complete - wait_for_group_dpd_update( - cptestctx, - &updated_ssm.multicast_ip, - dpd_predicates::expect_external_group(), - "source_ips update saga completed", + // Create project and IP pools in parallel + let (_, _, _) = ops::join3( + create_project(&client, project_name), + create_default_ip_pool(&client), + create_multicast_ip_pool_with_range( + &client, + "mcast-pool", + (224, 7, 0, 10), + (224, 7, 0, 255), + ), ) .await; - assert_eq!(updated_ssm.source_ips.len(), 2); - let source_strings: std::collections::HashSet = - updated_ssm.source_ips.iter().map(|ip| ip.to_string()).collect(); - assert!(source_strings.contains("10.3.3.3")); - assert!(source_strings.contains("10.3.3.4")); - - // Valid SSM source reduction (but must maintain at least one source) - let ssm_source_reduction = MulticastGroupUpdate { - identity: IdentityMetadataUpdateParams { - name: None, - description: None, - }, - source_ips: Some(vec!["10.3.3.3".parse().unwrap()]), // Reduce to one source - mvlan: None, + // Implicitly create multicast group by adding an instance as first member + let instance_name = "lookup-test-instance"; + create_instance(client, project_name, instance_name).await; + + let member_add_url = format!( + "{}?project={project_name}", + mcast_group_members_url(group_name) + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: None, }; - let reduced_ssm: MulticastGroup = object_put( + object_create::<_, MulticastGroupMember>( client, - &mcast_group_url(ssm_group_name), - &ssm_source_reduction, + &member_add_url, + &member_params, ) .await; - // Wait for source reduction saga to complete - wait_for_group_dpd_update( - cptestctx, - &reduced_ssm.multicast_ip, - dpd_predicates::expect_external_group(), - "source_ips reduction saga completed", - ) - .await; + // Wait for group to become active + wait_for_group_active(&client, group_name).await; - assert_eq!( - reduced_ssm.source_ips.len(), - 1, - "SSM group should have exactly one source after reduction" - ); - assert_eq!(reduced_ssm.source_ips[0].to_string(), "10.3.3.3"); + // Get the group to find its auto-allocated IP address + let created_group: MulticastGroup = + object_get(client, &mcast_group_url(group_name)).await; + let multicast_ip = created_group.multicast_ip; - // Test SSM group with mvlan (combined features) - let ssm_update_with_mvlan = MulticastGroupUpdate { - identity: IdentityMetadataUpdateParams { - name: None, - description: None, - }, - source_ips: Some(vec![ - "10.4.4.4".parse().unwrap(), - "10.4.4.5".parse().unwrap(), - ]), - mvlan: Some(Nullable(Some(VlanID::new(2500).unwrap()))), // Set mvlan on SSM group - }; - let ssm_with_mvlan: MulticastGroup = object_put( - client, - &mcast_group_url(ssm_group_name), - &ssm_update_with_mvlan, - ) - .await; + // Test lookup by IP (using the auto-allocated IP) via the main endpoint + // The main multicast-groups endpoint now accepts Name, ID, or IP + let lookup_url = format!("/v1/multicast-groups/{multicast_ip}"); + let found_group: MulticastGroup = object_get(client, &lookup_url).await; + assert_groups_eq(&created_group, &found_group); - // Wait for combined source_ips+mvlan update saga to complete - // Must verify vlan_id was applied to DPD - wait_for_group_dpd_update( - cptestctx, - &ssm_with_mvlan.multicast_ip, - dpd_predicates::expect_vlan_id(2500), - "source_ips+mvlan update saga completed, vlan_id=2500", - ) - .await; + // Test lookup with nonexistent IP + let nonexistent_ip = IpAddr::V4(Ipv4Addr::new(224, 0, 1, 200)); + let lookup_bad_url = format!("/v1/multicast-groups/{nonexistent_ip}"); - assert_eq!(ssm_with_mvlan.source_ips.len(), 2); - assert_eq!( - ssm_with_mvlan.mvlan, - Some(VlanID::new(2500).unwrap()), - "SSM group should support mvlan" - ); + object_get_error(client, &lookup_bad_url, StatusCode::NOT_FOUND).await; - // Update mvlan while keeping sources - let update_mvlan_only = MulticastGroupUpdate { - identity: IdentityMetadataUpdateParams { - name: None, - description: None, - }, - source_ips: None, // Don't change sources - mvlan: Some(Nullable(Some(VlanID::new(3000).unwrap()))), - }; - let mvlan_updated: MulticastGroup = object_put( - client, - &mcast_group_url(ssm_group_name), - &update_mvlan_only, - ) - .await; - assert_eq!(mvlan_updated.mvlan, Some(VlanID::new(3000).unwrap())); - assert_eq!( - mvlan_updated.source_ips.len(), - 2, - "Sources should be unchanged" - ); + cleanup_instances(cptestctx, client, project_name, &[instance_name]).await; + wait_for_group_deleted(client, group_name).await; +} - // Clear mvlan while updating sources - let clear_mvlan_update_sources = MulticastGroupUpdate { - identity: IdentityMetadataUpdateParams { - name: None, - description: None, - }, - source_ips: Some(vec!["10.5.5.5".parse().unwrap()]), - mvlan: Some(Nullable(None)), // Clear mvlan - }; - let mvlan_cleared: MulticastGroup = object_put( - client, - &mcast_group_url(ssm_group_name), - &clear_mvlan_update_sources, +#[nexus_test] +async fn test_instance_deletion_removes_multicast_memberships( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + let project_name = "springfield-squidport"; // Use the same project name as instance helpers + let group_name = "instance-deletion-group"; + let instance_name = "deletion-test-instance"; + + // Create project and IP pools in parallel + let (_, _, _) = ops::join3( + create_project(&client, project_name), + create_default_ip_pool(&client), + create_multicast_ip_pool_with_range( + &client, + "mcast-pool", + (224, 9, 0, 10), + (224, 9, 0, 255), + ), ) .await; - assert_eq!(mvlan_cleared.mvlan, None, "MVLAN should be cleared"); - assert_eq!(mvlan_cleared.source_ips.len(), 1); - assert_eq!(mvlan_cleared.source_ips[0].to_string(), "10.5.5.5"); - - // Create SSM group that requires proper address validation - let ssm_explicit_name = "ssm-explicit"; - let ssm_explicit_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: String::from(ssm_explicit_name).parse().unwrap(), - description: "SSM group with explicit 232.x.x.x IP".to_string(), - }, - multicast_ip: Some("232.99.0.42".parse().unwrap()), // Explicit SSM IP - source_ips: Some(vec!["10.5.5.5".parse().unwrap()]), - pool: Some(NameOrId::Name(ssm_pool.identity.name.clone())), - mvlan: None, + + // Implicitly create multicast group by adding instance as first member + let instance = create_instance(client, project_name, instance_name).await; + let member_add_url = format!( + "{}?project={project_name}", + mcast_group_members_url(group_name) + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: None, }; - let ssm_explicit = object_create::<_, MulticastGroup>( + object_create::<_, MulticastGroupMember>( client, - &group_url, - &ssm_explicit_params, + &member_add_url, + &member_params, ) .await; - wait_for_group_active(client, ssm_explicit_name).await; - assert_eq!(ssm_explicit.multicast_ip.to_string(), "232.99.0.42"); - assert_eq!(ssm_explicit.source_ips.len(), 1); + // Wait for group to become active after implicitly create + wait_for_group_active(&client, group_name).await; - // Try creating SSM group with invalid IP (should be rejected) - let invalid_ssm_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: "invalid-ssm".parse().unwrap(), - description: "Should be rejected".to_string(), - }, - multicast_ip: Some("224.99.0.42".parse().unwrap()), // ASM IP with sources - source_ips: Some(vec!["10.6.6.6".parse().unwrap()]), // Sources with ASM IP - pool: Some(NameOrId::Name(ssm_pool.identity.name.clone())), - mvlan: None, - }; + // Get the group to find its auto-allocated IP address (needed for DPD check) + let created_group: MulticastGroup = + object_get(client, &mcast_group_url(group_name)).await; + let multicast_ip = created_group.multicast_ip; - object_create_error( - client, - &group_url, - &invalid_ssm_params, - StatusCode::BAD_REQUEST, + // Wait for member to join + wait_for_member_state( + cptestctx, + group_name, + instance.identity.id, + nexus_db_model::MulticastGroupMemberState::Joined, ) .await; - // Clean up all groups - for group_name in [asm_group_name, ssm_group_name, ssm_explicit_name] { - let delete_url = mcast_group_url(group_name); - object_delete(client, &delete_url).await; - } + // Verify member was added + let members = list_multicast_group_members(&client, group_name).await; + assert_eq!(members.len(), 1, "Instance should be a member of the group"); + assert_eq!(members[0].instance_id, instance.identity.id); + + // Case: Instance deletion should clean up multicast memberships + // Use the helper function for proper instance deletion (handles Starting state) + cleanup_instances(cptestctx, client, project_name, &[instance_name]).await; + + // Verify instance is gone + let instance_url = + format!("/v1/instances/{instance_name}?project={project_name}"); + + object_get_error(client, &instance_url, StatusCode::NOT_FOUND).await; + + // Implicit model: group is implicitly deleted when last member (instance) is removed + wait_for_group_deleted(client, group_name).await; + + // DPD Validation: Ensure dataplane group is also cleaned up (implicit model) + let dpd_client = dpd_client(cptestctx); + let dpd_result = dpd_client.multicast_group_get(&multicast_ip).await; + assert!( + dpd_result.is_err(), + "Multicast group should be deleted from dataplane after last member removed (implicit model)" + ); } +/// Test that the multicast_ip field is correctly populated in MulticastGroupMember API responses. +/// This validates the denormalized multicast_ip field added for API ergonomics. #[nexus_test] -async fn test_multicast_group_with_mvlan(cptestctx: &ControlPlaneTestContext) { +async fn test_member_response_includes_multicast_ip( + cptestctx: &ControlPlaneTestContext, +) { let client = &cptestctx.external_client; - let project_name = "mvlan-test-project"; - let group_name = "mvlan-test-group"; + let project_name = "multicast-ip-test"; + let group_name = "test-group"; + let instance_name = "test-instance"; - // Setup - create_project(&client, project_name).await; - let mcast_pool = create_multicast_ip_pool_with_range( - &client, - "mvlan-pool", - (224, 50, 0, 10), - (224, 50, 0, 255), + // Create project and IP pools in parallel + let (_, _, _) = ops::join3( + create_project(&client, project_name), + create_default_ip_pool(&client), + create_multicast_ip_pool_with_range( + &client, + "test-pool", + (224, 30, 0, 1), + (224, 30, 0, 10), + ), ) .await; - let group_url = "/v1/multicast-groups".to_string(); + // Create instance for implicit group creation + create_instance(client, project_name, instance_name).await; - // Test creating group with mvlan - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: String::from(group_name).parse().unwrap(), - description: "Group with MVLAN for external uplink forwarding" - .to_string(), - }, - multicast_ip: None, + // Implicitly create group via member-add + let member_add_url = format!( + "{}?project={project_name}", + mcast_group_members_url(group_name) + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_name.parse().unwrap()), source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: Some(VlanID::new(100).unwrap()), // Set MVLAN to 100 }; - let created_group: MulticastGroup = - object_create(client, &group_url, ¶ms).await; + // Add member and verify multicast_ip field is present in response + let added_member: MulticastGroupMember = + object_create(client, &member_add_url, &member_params).await; + // Wait for group to become active wait_for_group_active(client, group_name).await; - // Verify mvlan was set correctly + // Get the group to verify its multicast_ip + let group: MulticastGroup = + object_get(client, &mcast_group_url(group_name)).await; + + // Verify multicast_ip field is present in member response assert_eq!( - created_group.mvlan, - Some(VlanID::new(100).unwrap()), - "MVLAN should be set to 100" + added_member.multicast_ip, group.multicast_ip, + "MulticastGroupMember API response should include multicast_ip field that matches the group's IP" ); - assert_eq!(created_group.identity.name, group_name); - // Verify we can fetch it and mvlan persists - let fetched_group_url = mcast_group_url(group_name); - let fetched_group: MulticastGroup = - object_get(client, &fetched_group_url).await; + // Verify multicast_ip is in expected range from the pool + let member_ip_str = added_member.multicast_ip.to_string(); + assert!( + member_ip_str.starts_with("224.30.0."), + "Member multicast_ip should be allocated from the pool range, got: {member_ip_str}" + ); + + // Case: List members and verify multicast_ip in all responses + let members_list_url = format!( + "{}?project={project_name}", + mcast_group_members_url(group_name) + ); + let members: ResultsPage = + object_get(client, &members_list_url).await; + + assert_eq!(members.items.len(), 1, "Should have exactly one member"); assert_eq!( - fetched_group.mvlan, - Some(VlanID::new(100).unwrap()), - "MVLAN should persist after fetch" + members.items[0].multicast_ip, group.multicast_ip, + "Listed member should also include multicast_ip field" ); - // DPD Validation: Verify mvlan is propagated to dataplane as vlan_id - let dpd_client = dpd_client(cptestctx); - let dpd_group = dpd_client - .multicast_group_get(&created_group.multicast_ip) - .await - .expect("Multicast group should exist in dataplane"); - - // Extract vlan_id from DPD response and verify it matches mvlan - match dpd_group.into_inner() { - dpd_types::MulticastGroupResponse::External { - external_forwarding, - .. - } => { - assert_eq!( - external_forwarding.vlan_id, - Some(100), - "DPD external_forwarding.vlan_id should match group mvlan" - ); - } - dpd_types::MulticastGroupResponse::Underlay { .. } => { - panic!("Expected external group, got underlay group"); - } - } + // Case: Remove and re-add member (reactivation) - verify field preserved + let member_remove_url = format!( + "{}/{instance_name}?project={project_name}", + mcast_group_members_url(group_name) + ); + NexusRequest::new( + RequestBuilder::new(client, http::Method::DELETE, &member_remove_url) + .expect_status(Some(StatusCode::NO_CONTENT)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("Should remove member"); + + wait_for_group_deleted(client, group_name).await; + + // Re-create group by adding member again + let readded_member: MulticastGroupMember = + object_create(client, &member_add_url, &member_params).await; + + wait_for_group_active(client, group_name).await; + + let new_group: MulticastGroup = + object_get(client, &mcast_group_url(group_name)).await; + + // Verify multicast_ip field is present in re-added member + assert_eq!( + readded_member.multicast_ip, new_group.multicast_ip, + "Re-added member should also have multicast_ip field" + ); + + NexusRequest::new( + RequestBuilder::new(client, http::Method::DELETE, &member_remove_url) + .expect_status(Some(StatusCode::NO_CONTENT)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("Should remove member for cleanup"); - // Clean up - object_delete(client, &fetched_group_url).await; wait_for_group_deleted(client, group_name).await; } +/// Test that we cannot delete a multicast IP pool when multicast groups are +/// linked to it (allocated IPs from it). +/// +/// With implicit groups: +/// - Groups implicitly create when instances join +/// - Groups hold IPs from pools while they exist +/// - Pool should be protected while groups exist +/// - After groups are implicitly deleted (last member leaves), pool can be deleted #[nexus_test] -async fn test_multicast_group_mvlan_updates( +async fn test_cannot_delete_multicast_pool_with_groups( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let project_name = "mvlan-update-project"; - let group_name = "mvlan-update-group"; + let project_name = "test-project"; + let pool_name = "mcast-pool-delete-test"; + let group_name = "mcast-group-blocks-delete"; + let instance_name = "pool-test-instance"; - // Setup - create_project(&client, project_name).await; - let mcast_pool = create_multicast_ip_pool_with_range( - &client, - "mvlan-update-pool", - (224, 51, 0, 10), - (224, 51, 0, 255), + // Create project and IP pools in parallel + let (_, _, _) = ops::join3( + create_project(&client, project_name), + create_default_ip_pool(&client), + create_multicast_ip_pool_with_range( + client, + pool_name, + (224, 10, 0, 1), + (224, 10, 0, 10), + ), ) .await; - let group_url = "/v1/multicast-groups".to_string(); + let pool_url = format!("/v1/system/ip-pools/{pool_name}"); + let range_url = format!("/v1/system/ip-pools/{pool_name}/ranges/remove"); - // Create group without mvlan - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: String::from(group_name).parse().unwrap(), - description: "Group for MVLAN update testing".to_string(), - }, - multicast_ip: None, + let range = IpRange::V4( + Ipv4Range::new( + std::net::Ipv4Addr::new(224, 10, 0, 1), + std::net::Ipv4Addr::new(224, 10, 0, 10), + ) + .unwrap(), + ); + + // Verify we can't delete the pool while it has ranges + let error: HttpErrorResponseBody = + object_delete_error(client, &pool_url, StatusCode::BAD_REQUEST).await; + assert_eq!( + error.message, + "IP Pool cannot be deleted while it contains IP ranges" + ); + + // Create instance and implicitly create group via member-add (implicit pattern) + create_instance(client, project_name, instance_name).await; + + let member_add_url = format!( + "/v1/multicast-groups/{}/members?project={}", + group_name, project_name + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_name.parse().unwrap()), source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, // Start without MVLAN }; - let created_group: MulticastGroup = - object_create(client, &group_url, ¶ms).await; + object_create::<_, MulticastGroupMember>( + client, + &member_add_url, + &member_params, + ) + .await; + // Wait for group to become active wait_for_group_active(client, group_name).await; - assert_eq!(created_group.mvlan, None, "MVLAN should initially be None"); - - let group_update_url = mcast_group_url(group_name); - - // Set mvlan to a value - let set_mvlan_update = MulticastGroupUpdate { - identity: IdentityMetadataUpdateParams { - name: None, - description: None, - }, - source_ips: None, - mvlan: Some(Nullable(Some(VlanID::new(200).unwrap()))), // Set to 200 - }; + // Verify we can't delete the range while groups are allocated from it + let error: HttpErrorResponseBody = NexusRequest::new( + RequestBuilder::new(client, Method::POST, &range_url) + .body(Some(&range)) + .expect_status(Some(StatusCode::BAD_REQUEST)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + assert_eq!( + error.message, + "IP pool ranges cannot be deleted while multicast groups are allocated from them" + ); - let updated_group: MulticastGroup = - object_put(client, &group_update_url, &set_mvlan_update).await; + // Verify we still can't delete the pool (indirectly protected by ranges) + let error: HttpErrorResponseBody = + object_delete_error(client, &pool_url, StatusCode::BAD_REQUEST).await; assert_eq!( - updated_group.mvlan, - Some(VlanID::new(200).unwrap()), - "MVLAN should be set to 200" + error.message, + "IP Pool cannot be deleted while it contains IP ranges" ); - // Change mvlan to a different value - let change_mvlan_update = MulticastGroupUpdate { - identity: IdentityMetadataUpdateParams { - name: None, - description: None, - }, - source_ips: None, - mvlan: Some(Nullable(Some(VlanID::new(300).unwrap()))), // Change to 300 - }; + cleanup_instances(cptestctx, client, project_name, &[instance_name]).await; + wait_for_group_deleted(client, group_name).await; - let changed_group: MulticastGroup = - object_put(client, &group_update_url, &change_mvlan_update).await; - assert_eq!( - changed_group.mvlan, - Some(VlanID::new(300).unwrap()), - "MVLAN should be changed to 300" + // Now we should be able to delete the range + NexusRequest::new( + RequestBuilder::new(client, Method::POST, &range_url) + .body(Some(&range)) + .expect_status(Some(StatusCode::NO_CONTENT)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect( + "Should be able to delete range after groups are implicitly deleted", ); - // Clear mvlan back to None - let clear_mvlan_update = MulticastGroupUpdate { - identity: IdentityMetadataUpdateParams { - name: None, - description: None, - }, - source_ips: None, - mvlan: Some(Nullable(None)), // Clear to NULL - }; + // And now we should be able to delete the pool + NexusRequest::object_delete(client, &pool_url) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("Should be able to delete pool after ranges are deleted"); +} - let cleared_group: MulticastGroup = - object_put(client, &group_update_url, &clear_mvlan_update).await; - assert_eq!(cleared_group.mvlan, None, "MVLAN should be cleared to None"); +/// Assert that two multicast groups are equal in all fields. +fn assert_groups_eq(left: &MulticastGroup, right: &MulticastGroup) { + assert_eq!(left.identity.id, right.identity.id); + assert_eq!(left.identity.name, right.identity.name); + assert_eq!(left.identity.description, right.identity.description); + assert_eq!(left.multicast_ip, right.multicast_ip); + assert_eq!(left.source_ips, right.source_ips); + assert_eq!(left.mvlan, right.mvlan); + assert_eq!(left.ip_pool_id, right.ip_pool_id); +} - // Set mvlan again, then test omitting the field preserves existing value - let set_mvlan_200 = MulticastGroupUpdate { - identity: IdentityMetadataUpdateParams { - name: None, - description: None, - }, - source_ips: None, - mvlan: Some(Nullable(Some(VlanID::new(200).unwrap()))), - }; +/// Test that source IPs are validated when joining a multicast group. +/// +/// Source IPs enable Source-Specific Multicast (SSM) where traffic is filtered +/// by both destination (multicast IP) and source addresses. +#[nexus_test] +async fn test_source_ip_validation_on_join( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + let project_name = "source-ip-validation-project"; + let group_name = "source-ip-test-group"; + let instance_name = "source-ip-test-instance"; - let group_with_200: MulticastGroup = - object_put(client, &group_update_url, &set_mvlan_200).await; - assert_eq!( - group_with_200.mvlan, - Some(VlanID::new(200).unwrap()), - "MVLAN should be set to 200" + // Create project and IP pools in parallel + // SSM groups require an SSM pool (232.x.x.x range for IPv4) + let (_, _, _) = ops::join3( + create_project(&client, project_name), + create_default_ip_pool(&client), + create_multicast_ip_pool_with_range( + &client, + "source-ip-mcast-pool", + (232, 1, 0, 1), + (232, 1, 0, 255), + ), + ) + .await; + + // Create instances + create_instance(client, project_name, instance_name).await; + let instance2_name = "source-ip-test-instance-2"; + create_instance(client, project_name, instance2_name).await; + let instance3_name = "source-ip-test-instance-3"; + create_instance(client, project_name, instance3_name).await; + + let member_add_url = format!( + "/v1/multicast-groups/{group_name}/members?project={project_name}" ); + let valid_source = IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1)); - // Omit mvlan field entirely - should preserve existing value (200) - let omit_mvlan_update = MulticastGroupUpdate { - identity: IdentityMetadataUpdateParams { - name: None, - description: Some("Updated description".to_string()), - }, - source_ips: None, - mvlan: None, // Omit the field + // Case: Valid unicast source IP - creates SSM group + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: Some(vec![valid_source]), }; + object_create::<_, MulticastGroupMember>( + client, + &member_add_url, + &member_params, + ) + .await; - let unchanged_group: MulticastGroup = - object_put(client, &group_update_url, &omit_mvlan_update).await; - assert_eq!( - unchanged_group.mvlan, - Some(VlanID::new(200).unwrap()), - "MVLAN should remain at 200 when field is omitted" - ); - assert_eq!( - unchanged_group.identity.description, "Updated description", - "Description should be updated" - ); + let group: MulticastGroup = + object_get(client, &mcast_group_url(group_name)).await; + assert_eq!(group.source_ips, vec![valid_source]); - // Test invalid mvlan during update (reserved value 1) - let invalid_mvlan_update = MulticastGroupUpdate { - identity: IdentityMetadataUpdateParams { - name: None, - description: None, - }, - source_ips: None, - mvlan: Some(Nullable(Some(VlanID::new(1).unwrap()))), // Reserved value + // Case: Second instance joining with same source IPs - should succeed + let member_params2 = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance2_name.parse().unwrap()), + source_ips: Some(vec![valid_source]), }; + object_create::<_, MulticastGroupMember>( + client, + &member_add_url, + &member_params2, + ) + .await; - object_put_error( + // Case: Third instance joining with different source IPs - should fail + let different_source = IpAddr::V4(Ipv4Addr::new(10, 0, 0, 99)); + let member_params3 = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance3_name.parse().unwrap()), + source_ips: Some(vec![different_source]), + }; + let error = object_create_error( client, - &group_update_url, - &invalid_mvlan_update, + &member_add_url, + &member_params3, StatusCode::BAD_REQUEST, ) .await; + assert!( + error.message.contains("source"), + "Error should mention source IPs mismatch: {}", + error.message + ); - // Clean up - object_delete(client, &group_update_url).await; + // Cleanup + cleanup_instances( + cptestctx, + client, + project_name, + &[instance_name, instance2_name, instance3_name], + ) + .await; wait_for_group_deleted(client, group_name).await; } +/// Test default pool behavior when no pool is specified on member join. +/// +/// When a member joins a group without specifying a pool: +/// - If a default multicast pool exists, use it +/// - If no default pool, fail with appropriate error #[nexus_test] -async fn test_multicast_group_mvlan_validation( +async fn test_default_pool_on_implicit_creation( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let project_name = "mvlan-validation-project"; - - // Setup - create_project(&client, project_name).await; - let mcast_pool = create_multicast_ip_pool_with_range( - &client, - "mvlan-validation-pool", - (224, 52, 0, 10), - (224, 52, 0, 255), - ) - .await; - - let group_url = "/v1/multicast-groups".to_string(); - - // Test valid MVLAN values (2-4094) - // Note: VLANs 0 and 1 are reserved and rejected by Dendrite (>= 2 required) - // VLAN 4095 is reserved per IEEE 802.1Q and rejected by VlanID type (max 4094) - - // Valid: mid-range value - let mid_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: "mvlan-mid".parse().unwrap(), - description: "Group with mid-range MVLAN".to_string(), - }, - multicast_ip: None, - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: Some(VlanID::new(2048).unwrap()), - }; + let group_name = "default-pool-test-group"; + let group_name2 = "default-pool-test-group-2"; + let project_name = "default-pool-test-project"; + let instance_name = "default-pool-test-instance"; - let mid_group: MulticastGroup = - object_create(client, &group_url, &mid_params).await; - wait_for_group_active(client, "mvlan-mid").await; - assert_eq!( - mid_group.mvlan, - Some(VlanID::new(2048).unwrap()), - "MVLAN 2048 should be valid" - ); - object_delete(client, &mcast_group_url("mvlan-mid")).await; - wait_for_group_deleted(client, "mvlan-mid").await; - - // Valid: maximum value (4094) - let max_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: "mvlan-max".parse().unwrap(), - description: "Group with maximum MVLAN".to_string(), - }, - multicast_ip: None, - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: Some(VlanID::new(4094).unwrap()), - }; + // Setup: project and default IP pool in parallel (but no multicast pool yet) + let (_, _) = ops::join2( + create_project(&client, project_name), + create_default_ip_pool(&client), + ) + .await; + create_instance(client, project_name, instance_name).await; - let max_group: MulticastGroup = - object_create(client, &group_url, &max_params).await; - wait_for_group_active(client, "mvlan-max").await; - assert_eq!( - max_group.mvlan, - Some(VlanID::new(4094).unwrap()), - "MVLAN 4094 should be valid" + let member_add_url = format!( + "/v1/multicast-groups/{group_name}/members?project={project_name}" ); - object_delete(client, &mcast_group_url("mvlan-max")).await; - wait_for_group_deleted(client, "mvlan-max").await; - - // Invalid: reserved value 0 (rejected by Dendrite) - let invalid_params0 = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: "mvlan-invalid-0".parse().unwrap(), - description: "Group with invalid MVLAN 0".to_string(), - }, - multicast_ip: None, + + // Case: Joining when no multicast pool exists - should fail with 400 (no pool available) + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_name.parse().unwrap()), source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: Some(VlanID::new(0).unwrap()), }; - object_create_error( client, - &group_url, - &invalid_params0, + &member_add_url, + &member_params, StatusCode::BAD_REQUEST, ) .await; - // Invalid: reserved value 1 (rejected by Dendrite) - let invalid_params1 = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: "mvlan-invalid-1".parse().unwrap(), - description: "Group with invalid MVLAN 1".to_string(), - }, - multicast_ip: None, + // Create a default multicast pool + let mcast_pool = + create_multicast_ip_pool(&client, "default-mcast-pool").await; + + // Case: Joining when multicast pool exists - should succeed (pool auto-discovered) + let member_add_url2 = format!( + "/v1/multicast-groups/{group_name2}/members?project={project_name}" + ); + let member_params2 = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_name.parse().unwrap()), source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: Some(VlanID::new(1).unwrap()), }; - - object_create_error( + object_create::<_, MulticastGroupMember>( client, - &group_url, - &invalid_params1, - StatusCode::BAD_REQUEST, + &member_add_url2, + &member_params2, ) .await; - // Test invalid MVLAN at API boundary using raw JSON. - // The deserializer rejects invalid values at the HTTP boundary before they - // reach the business logic layer. - - // Invalid: raw JSON with mvlan = 0 (should get 400 Bad Request) - let raw_json0 = serde_json::json!({ - "identity": { - "name": "mvlan-raw-0", - "description": "Test raw JSON with mvlan 0" - }, - "mvlan": 0, - "pool": mcast_pool.identity.name - }); - - NexusRequest::new( - RequestBuilder::new(client, http::Method::POST, &group_url) - .body(Some(&raw_json0)) - .expect_status(Some(StatusCode::BAD_REQUEST)), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .expect("Expected 400 Bad Request for raw JSON mvlan=0"); - - // Invalid: raw JSON with mvlan = 1 (should get 400 Bad Request) - let raw_json1 = serde_json::json!({ - "identity": { - "name": "mvlan-raw-1", - "description": "Test raw JSON with mvlan 1" - }, - "mvlan": 1, - "pool": mcast_pool.identity.name - }); + // Verify group was allocated from default pool + let group: MulticastGroup = + object_get(client, &mcast_group_url(group_name2)).await; + assert_eq!(group.ip_pool_id, mcast_pool.identity.id); - NexusRequest::new( - RequestBuilder::new(client, http::Method::POST, &group_url) - .body(Some(&raw_json1)) - .expect_status(Some(StatusCode::BAD_REQUEST)), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .expect("Expected 400 Bad Request for raw JSON mvlan=1"); + // Cleanup + cleanup_instances(cptestctx, client, project_name, &[instance_name]).await; + wait_for_group_deleted(client, group_name2).await; } -/// Database round-trip tests for MVLAN values -/// Verifies that VlanID <-> i16 conversion works correctly for all valid values +/// Test pool range allocation for multicast groups. +/// +/// Verifies that multicast IPs are correctly allocated from the specified +/// pool's ranges, and that exhausted ranges are handled properly. #[nexus_test] -async fn test_mvlan_database_round_trip(cptestctx: &ControlPlaneTestContext) { +async fn test_pool_range_allocation(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let project_name = "mvlan-roundtrip-project"; + let project_name = "pool-range-test-project"; - // Setup - create_project(&client, project_name).await; - let mcast_pool = create_multicast_ip_pool_with_range( - &client, - "mvlan-roundtrip-pool", - (224, 53, 0, 10), - (224, 53, 0, 255), + // Create project and IP pools in parallel + // Multicast pool has small range (3 IPs: 224.10.0.1-224.10.0.3) + let (_, _, _) = ops::join3( + create_project(&client, project_name), + create_default_ip_pool(&client), + create_multicast_ip_pool_with_range( + &client, + "small-range-pool", + (224, 10, 0, 1), + (224, 10, 0, 3), + ), ) .await; - let group_url = "/v1/multicast-groups".to_string(); - - // Test cases: (group_name, mvlan_value) - let test_cases = vec![ - ("mvlan-none", None), - ("mvlan-2", Some(VlanID::new(2).unwrap())), - ("mvlan-100", Some(VlanID::new(100).unwrap())), - ("mvlan-4094", Some(VlanID::new(4094).unwrap())), - ]; + // Create 4 instances for testing + let instance_names = + ["range-inst-1", "range-inst-2", "range-inst-3", "range-inst-4"]; + for name in &instance_names { + create_instance(client, project_name, name).await; + } - for (group_name, mvlan) in &test_cases { - // Create group with specified mvlan - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: group_name.parse().unwrap(), - description: format!("Testing mvlan={mvlan:?}"), - }, - multicast_ip: None, + // Case: Create 3 groups - should succeed (uses all IPs in range) + for i in 0..3 { + let group_name = format!("range-group-{i}"); + let member_add_url = format!( + "/v1/multicast-groups/{group_name}/members?project={project_name}" + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_names[i].parse().unwrap()), source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: *mvlan, }; - - let created_group: MulticastGroup = - object_create(client, &group_url, ¶ms).await; - wait_for_group_active(client, group_name).await; - - // Verify the created group has the correct mvlan - assert_eq!( - created_group.mvlan, *mvlan, - "Created group should have mvlan={:?}", - mvlan - ); - - // Fetch the group back from the database and verify it matches - let fetched_group = get_multicast_group(client, group_name).await; - assert_eq!( - fetched_group.mvlan, *mvlan, - "Fetched group should have mvlan={:?}", - mvlan - ); - assert_eq!( - fetched_group.identity.id, created_group.identity.id, - "Fetched group ID should match created group ID" - ); - - // Clean up - object_delete(client, &mcast_group_url(group_name)).await; - wait_for_group_deleted(client, group_name).await; + object_create::<_, MulticastGroupMember>( + client, + &member_add_url, + &member_params, + ) + .await; } -} - -#[nexus_test] -async fn test_multicast_group_mvlan_with_member_operations( - cptestctx: &ControlPlaneTestContext, -) { - let client = &cptestctx.external_client; - let project_name = "mvlan-member-project"; - let group_name = "mvlan-member-group"; - let instance_name = "mvlan-test-instance"; - // Setup - create_default_ip_pool(&client).await; - create_project(&client, project_name).await; - let mcast_pool = create_multicast_ip_pool_with_range( - &client, - "mvlan-member-pool", - (224, 60, 0, 10), - (224, 60, 0, 50), + // Case: Try to create 4th group - should fail (range exhausted) + let group_name4 = "range-group-3"; + let member_add_url4 = format!( + "/v1/multicast-groups/{group_name4}/members?project={project_name}" + ); + let member_params4 = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_names[3].parse().unwrap()), + source_ips: None, + }; + let error = object_create_error( + client, + &member_add_url4, + &member_params4, + StatusCode::INSUFFICIENT_STORAGE, // or appropriate error code ) .await; + assert!( + error.message.contains("IP") + || error.message.contains("exhausted") + || error.message.contains("available"), + "Error should mention IP exhaustion: {}", + error.message + ); - let group_url = "/v1/multicast-groups".to_string(); + // Case: Delete one group (by removing all members) + cleanup_instances(cptestctx, client, project_name, &[instance_names[0]]) + .await; + wait_for_group_deleted(client, "range-group-0").await; - // Create group with mvlan - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: String::from(group_name).parse().unwrap(), - description: "Group for testing mvlan with members".to_string(), - }, - multicast_ip: None, + // Case: Create new group - should succeed (IP reclaimed) + let group_name_new = "range-group-new"; + let member_add_url_new = format!( + "/v1/multicast-groups/{group_name_new}/members?project={project_name}" + ); + let member_params_new = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_names[3].parse().unwrap()), source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: Some(VlanID::new(2048).unwrap()), // Set MVLAN }; + object_create::<_, MulticastGroupMember>( + client, + &member_add_url_new, + &member_params_new, + ) + .await; - let created_group: MulticastGroup = - object_create(client, &group_url, ¶ms).await; - wait_for_group_active(client, group_name).await; - - assert_eq!(created_group.mvlan, Some(VlanID::new(2048).unwrap())); - - // Create and start instance - let instance = instance_for_multicast_groups( + // Cleanup + cleanup_instances( cptestctx, + client, project_name, - instance_name, - true, // start the instance - &[], // no groups at creation + &[instance_names[1], instance_names[2], instance_names[3]], ) .await; +} - // Attach instance to group with mvlan - multicast_group_attach(cptestctx, project_name, instance_name, group_name) - .await; +/// Test that groups are allocated from the auto-discovered pool. +/// +/// Pool selection is automatic - when multiple pools exist, the first one +/// alphabetically is used (after preferring any default pool). +#[nexus_test] +async fn test_automatic_pool_selection(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + let project_name = "pool-selection-test-project"; + let instance_name = "pool-selection-instance"; - // Wait for member to reach Joined state - wait_for_member_state( - cptestctx, - group_name, - instance.identity.id, - nexus_db_model::MulticastGroupMemberState::Joined, + // Setup: project and default IP pool in parallel + let (_, _) = ops::join2( + create_project(&client, project_name), + create_default_ip_pool(&client), ) .await; + create_instance(client, project_name, instance_name).await; - // Verify DPD shows vlan_id=2048 - let dpd_client = dpd_client(cptestctx); - let dpd_group = dpd_client - .multicast_group_get(&created_group.multicast_ip) - .await - .expect("Multicast group should exist in DPD"); - - match dpd_group.into_inner() { - dpd_types::MulticastGroupResponse::External { - external_forwarding, - .. - } => { - assert_eq!( - external_forwarding.vlan_id, - Some(2048), - "DPD should show vlan_id matching group mvlan" - ); - } - dpd_types::MulticastGroupResponse::Underlay { .. } => { - panic!("Expected external group, got underlay"); - } - } + // Create a multicast pool (after instance, to test auto-discovery) + let mcast_pool = create_multicast_ip_pool_with_range( + &client, + "mcast-pool", + (224, 20, 0, 1), + (224, 20, 0, 10), + ) + .await; - // Clean up: stop instance before deleting - let instance_stop_url = - format!("/v1/instances/{instance_name}/stop?project={project_name}"); - NexusRequest::new( - RequestBuilder::new(client, Method::POST, &instance_stop_url) - .body(None as Option<&serde_json::Value>) - .expect_status(Some(StatusCode::ACCEPTED)), + // Case: Join group - pool is auto-discovered + let group_name = "auto-pool-group"; + let member_add_url = format!( + "/v1/multicast-groups/{group_name}/members?project={project_name}" + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: None, + }; + object_create::<_, MulticastGroupMember>( + client, + &member_add_url, + &member_params, ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .expect("Should stop instance"); + .await; - let nexus = &cptestctx.server.server_context().nexus; - let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); - instance_simulate(nexus, &instance_id).await; - instance_wait_for_state(client, instance_id, InstanceState::Stopped).await; + let group_view: MulticastGroup = + object_get(client, &mcast_group_url(group_name)).await; + // Pool is auto-discovered from available multicast pools + assert_eq!(group_view.ip_pool_id, mcast_pool.identity.id); + // Verify IP is in pool's range (224.20.0.x) + if let IpAddr::V4(ip) = group_view.multicast_ip { + assert_eq!(ip.octets()[0], 224); + assert_eq!(ip.octets()[1], 20); + } else { + panic!("Expected IPv4 multicast address"); + } - let instance_url = - format!("/v1/instances/{instance_name}?project={project_name}"); - object_delete(client, &instance_url).await; - object_delete(client, &mcast_group_url(group_name)).await; + // Cleanup + cleanup_instances(cptestctx, client, project_name, &[instance_name]).await; wait_for_group_deleted(client, group_name).await; } +/// Test validation errors for pool exhaustion. #[nexus_test] -async fn test_multicast_group_mvlan_reconciler_update( - cptestctx: &ControlPlaneTestContext, -) { +async fn test_pool_exhaustion(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let project_name = "mvlan-reconciler-project"; - let group_name = "mvlan-reconciler-group"; - let instance_name = "mvlan-reconciler-instance"; + let project_name = "pool-exhaustion-test-project"; - // Setup - create_default_ip_pool(&client).await; - create_project(&client, project_name).await; - let mcast_pool = create_multicast_ip_pool_with_range( - &client, - "mvlan-reconciler-pool", - (224, 70, 0, 10), - (224, 70, 0, 50), + // Create project and IP pools in parallel (multicast pool has single IP) + let (_, _, _) = ops::join3( + create_project(&client, project_name), + create_default_ip_pool(&client), + create_multicast_ip_pool_with_range( + &client, + "empty-pool", + (224, 99, 0, 1), + (224, 99, 0, 1), // Single IP + ), ) .await; - let group_url = "/v1/multicast-groups".to_string(); - - // Create group with initial mvlan=2000 - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: String::from(group_name).parse().unwrap(), - description: "Group for testing reconciler mvlan updates" - .to_string(), - }, - multicast_ip: None, + // Use the single IP + let instance_name = "pool-exhaust-instance"; + create_instance(client, project_name, instance_name).await; + let group_exhaust = "exhaust-empty-pool"; + let member_add_exhaust = format!( + "/v1/multicast-groups/{group_exhaust}/members?project={project_name}" + ); + let member_params_exhaust = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_name.parse().unwrap()), source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: Some(VlanID::new(2000).unwrap()), }; + object_create::<_, MulticastGroupMember>( + client, + &member_add_exhaust, + &member_params_exhaust, + ) + .await; - let created_group: MulticastGroup = - object_create(client, &group_url, ¶ms).await; - wait_for_group_active(client, group_name).await; + // Now try to create another group - should fail + let instance2_name = "pool-exhaust-instance-2"; + create_instance(client, project_name, instance2_name).await; + let group_fail = "fail-empty-pool"; + let member_add_fail = format!( + "/v1/multicast-groups/{group_fail}/members?project={project_name}" + ); + let member_params_fail = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance2_name.parse().unwrap()), + source_ips: None, + }; + object_create_error( + client, + &member_add_fail, + &member_params_fail, + StatusCode::INSUFFICIENT_STORAGE, + ) + .await; - // Create and start instance, attach to group - let instance = instance_for_multicast_groups( + // Cleanup + cleanup_instances( cptestctx, + client, project_name, - instance_name, - true, // start the instance - &[], + &[instance_name, instance2_name], ) .await; +} - multicast_group_attach(cptestctx, project_name, instance_name, group_name) - .await; - wait_for_member_state( - cptestctx, - group_name, - instance.identity.id, - nexus_db_model::MulticastGroupMemberState::Joined, +/// Test multiple instances joining different SSM groups from the same SSM pool. +/// +/// Verifies: +/// - Pool allocates unique multicast IPs to each SSM group +/// - Different SSM groups can coexist with different source IP requirements +/// - Proper isolation between SSM groups on the same pool +#[nexus_test] +async fn test_multiple_ssm_groups_same_pool( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + let project_name = "multiple-ssm-test"; + + // Create project and IP pools in parallel + let (_, _, ssm_pool) = ops::join3( + create_project(&client, project_name), + create_default_ip_pool(&client), + create_multicast_ip_pool_with_range( + &client, + "ssm-shared-pool", + (232, 50, 0, 10), + (232, 50, 0, 50), + ), ) .await; - // Verify initial mvlan in DPD - let dpd_client = dpd_client(cptestctx); - let initial_dpd_group = dpd_client - .multicast_group_get(&created_group.multicast_ip) - .await - .expect("Group should exist in DPD"); + // Create 3 instances + let instance_names = ["ssm-inst-1", "ssm-inst-2", "ssm-inst-3"]; + for name in &instance_names { + create_instance(client, project_name, name).await; + } + + // Each instance joins a different SSM group with different sources + let group_configs = [ + ("ssm-group-1", "10.1.1.1"), + ("ssm-group-2", "10.2.2.2"), + ("ssm-group-3", "10.3.3.3"), + ]; + + // Create all 3 SSM groups from the same pool + for (i, (group_name, source_ip)) in group_configs.iter().enumerate() { + let member_add_url = format!( + "/v1/multicast-groups/{}/members?project={}", + group_name, project_name + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance_names[i].parse().unwrap()), + source_ips: Some(vec![source_ip.parse().unwrap()]), + }; + object_create::<_, MulticastGroupMember>( + client, + &member_add_url, + &member_params, + ) + .await; + + // Wait for group to become active + wait_for_group_active(client, group_name).await; + } + + // Verify all groups exist with correct properties + let mut allocated_ips = Vec::new(); + for (group_name, expected_source) in &group_configs { + let group: MulticastGroup = + object_get(client, &mcast_group_url(group_name)).await; + + // Verify pool reference + assert_eq!( + group.ip_pool_id, ssm_pool.identity.id, + "Group {} should reference the shared SSM pool", + group_name + ); - match initial_dpd_group.into_inner() { - dpd_types::MulticastGroupResponse::External { - external_forwarding, - .. - } => { + // Verify SSM range (232.x.x.x) + if let IpAddr::V4(ip) = group.multicast_ip { assert_eq!( - external_forwarding.vlan_id, - Some(2000), - "DPD should show initial vlan_id=2000" + ip.octets()[0], + 232, + "Group {} should have IP in SSM range (232.x.x.x)", + group_name ); + assert_eq!( + ip.octets()[1], + 50, + "Group {} should have IP from pool range (232.50.x.x)", + group_name + ); + } else { + panic!("Expected IPv4 multicast address for group {}", group_name); } - dpd_types::MulticastGroupResponse::Underlay { .. } => { - panic!("Expected external group"); - } - } - // Update mvlan to 3500 while member is active - let update_mvlan = MulticastGroupUpdate { - identity: IdentityMetadataUpdateParams { - name: None, - description: None, - }, - source_ips: None, - mvlan: Some(Nullable(Some(VlanID::new(3500).unwrap()))), // Update to 3500 - }; + // Verify source IPs + assert_eq!( + group.source_ips.len(), + 1, + "Group {} should have exactly 1 source IP", + group_name + ); + assert_eq!( + group.source_ips[0].to_string(), + *expected_source, + "Group {} should have correct source IP", + group_name + ); - let updated_group: MulticastGroup = - object_put(client, &mcast_group_url(group_name), &update_mvlan).await; + // Collect allocated IP for uniqueness check + allocated_ips.push(group.multicast_ip); + } + + // Verify all allocated IPs are unique + let unique_ips: std::collections::HashSet<_> = + allocated_ips.iter().collect(); assert_eq!( - updated_group.mvlan, - Some(VlanID::new(3500).unwrap()), - "Group mvlan should be updated" + unique_ips.len(), + allocated_ips.len(), + "All SSM groups should have unique multicast IPs from the pool" ); - // Wait for reconciler to process the mvlan change and verify DPD state - wait_for_group_dpd_update( - cptestctx, - &created_group.multicast_ip, - dpd_predicates::expect_vlan_id(3500), - "vlan_id = Some(3500)", - ) - .await; + // Verify we can list all members for each group + for (group_name, _) in &group_configs { + let members = list_multicast_group_members(&client, group_name).await; + assert_eq!( + members.len(), + 1, + "Group {} should have exactly 1 member", + group_name + ); + } - // Member should still be Joined after mvlan update - let members = list_multicast_group_members(client, group_name).await; - assert_eq!(members.len(), 1); - assert_eq!( - members[0].state, "Joined", - "Member should remain Joined after mvlan update" - ); + // Test that instances cannot join groups with different source IPs + let instance4_name = "ssm-inst-4"; + create_instance(client, project_name, instance4_name).await; - // Clean up: stop instance before deleting - let instance_stop_url = - format!("/v1/instances/{instance_name}/stop?project={project_name}"); - NexusRequest::new( - RequestBuilder::new(client, Method::POST, &instance_stop_url) - .body(None as Option<&serde_json::Value>) - .expect_status(Some(StatusCode::ACCEPTED)), + let member_add_url_wrong_source = format!( + "/v1/multicast-groups/ssm-group-1/members?project={}", + project_name + ); + let member_params_wrong_source = MulticastGroupMemberAdd { + instance: NameOrId::Name(instance4_name.parse().unwrap()), + source_ips: Some(vec!["10.99.99.99".parse().unwrap()]), // Different from group's 10.1.1.1 + }; + let error = object_create_error( + client, + &member_add_url_wrong_source, + &member_params_wrong_source, + StatusCode::BAD_REQUEST, ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .expect("Should stop instance"); - - let nexus = &cptestctx.server.server_context().nexus; - let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); - instance_simulate(nexus, &instance_id).await; - instance_wait_for_state(client, instance_id, InstanceState::Stopped).await; + .await; + assert!( + error.message.contains("source"), + "Should reject instance joining SSM group with different source IPs: {}", + error.message + ); - let instance_url = - format!("/v1/instances/{instance_name}?project={project_name}"); - object_delete(client, &instance_url).await; - object_delete(client, &mcast_group_url(group_name)).await; - wait_for_group_deleted(client, group_name).await; -} + let all_instances: Vec<_> = instance_names + .iter() + .chain(std::iter::once(&instance4_name)) + .map(|s| *s) + .collect(); + cleanup_instances(cptestctx, client, project_name, &all_instances).await; -/// Assert that two multicast groups are equal in all fields. -fn assert_groups_eq(left: &MulticastGroup, right: &MulticastGroup) { - assert_eq!(left.identity.id, right.identity.id); - assert_eq!(left.identity.name, right.identity.name); - assert_eq!(left.identity.description, right.identity.description); - assert_eq!(left.multicast_ip, right.multicast_ip); - assert_eq!(left.source_ips, right.source_ips); - assert_eq!(left.mvlan, right.mvlan); - assert_eq!(left.ip_pool_id, right.ip_pool_id); + // Verify all groups are deleted + for (group_name, _) in &group_configs { + wait_for_group_deleted(client, group_name).await; + } } diff --git a/nexus/tests/integration_tests/multicast/instances.rs b/nexus/tests/integration_tests/multicast/instances.rs index 335a269e049..918738eb71e 100644 --- a/nexus/tests/integration_tests/multicast/instances.rs +++ b/nexus/tests/integration_tests/multicast/instances.rs @@ -6,12 +6,19 @@ //! Tests multicast group + instance integration. //! -//! Tests that verify multicast group functionality when integrated with -//! instance creation, modification, and deletion. - -use std::net::{IpAddr, Ipv4Addr}; +//! Instance lifecycle tests: +//! +//! - Full lifecycle: Create, attach, start, stop, delete flows +//! - Attach conflicts: Cannot attach same instance twice to same group +//! - Attach limits: Validates per-instance multicast group limits +//! - State transitions: Member states change with instance state +//! - Persistence: Memberships survive instance stop/start cycles +//! - Concurrent operations: Parallel attach/detach operations +//! - Never-started instances: Cleanup of members for instances never started +//! - Migration: Memberships update correctly when instance migrates use http::{Method, StatusCode}; + use nexus_db_queries::context::OpContext; use nexus_test_utils::http_testing::{AuthnMode, NexusRequest, RequestBuilder}; use nexus_test_utils::resource_helpers::{ @@ -20,8 +27,7 @@ use nexus_test_utils::resource_helpers::{ }; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params::{ - InstanceCreate, InstanceNetworkInterfaceAttachment, MulticastGroupCreate, - MulticastGroupMemberAdd, + InstanceCreate, InstanceNetworkInterfaceAttachment, MulticastGroupMemberAdd, }; use nexus_types::external_api::views::{MulticastGroup, MulticastGroupMember}; use nexus_types::internal_api::params::InstanceMigrateRequest; @@ -30,7 +36,6 @@ use omicron_common::api::external::{ ByteCount, IdentityMetadataCreateParams, Instance, InstanceCpuCount, InstanceState, NameOrId, }; -use omicron_common::vlan::VlanID; use omicron_nexus::TestInterfaces; use omicron_uuid_kinds::{GenericUuid, InstanceUuid}; use sled_agent_client::TestInterfaces as _; @@ -47,57 +52,36 @@ const PROJECT_NAME: &str = "test-project"; async fn test_multicast_lifecycle(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - // Setup - create IP pool and project (shared across all operations) - create_default_ip_pool(&client).await; - create_project(client, PROJECT_NAME).await; - let mcast_pool = create_multicast_ip_pool_with_range( - &client, - "mcast-pool-comprehensive", - (224, 30, 0, 1), // Large range: 224.30.0.1 - (224, 30, 0, 255), // to 224.30.0.255 (255 IPs) + // Create project and pools in parallel + let (_, _, mcast_pool) = ops::join3( + create_default_ip_pool(&client), + create_project(client, PROJECT_NAME), + create_multicast_ip_pool_with_range( + &client, + "mcast-pool-comprehensive", + (224, 30, 0, 1), // Large range: 224.30.0.1 + (224, 30, 0, 255), // to 224.30.0.255 (255 IPs) + ), ) .await; - // Create multiple multicast groups in parallel - let group_specs = &[ - MulticastGroupForTest { - name: "group-lifecycle-1", - multicast_ip: IpAddr::V4(Ipv4Addr::new(224, 30, 0, 101)), - description: Some("Group for lifecycle testing 1".to_string()), - }, - MulticastGroupForTest { - name: "group-lifecycle-2", - multicast_ip: IpAddr::V4(Ipv4Addr::new(224, 30, 0, 102)), - description: Some("Group for lifecycle testing 2".to_string()), - }, - MulticastGroupForTest { - name: "group-lifecycle-3", - multicast_ip: IpAddr::V4(Ipv4Addr::new(224, 30, 0, 103)), - description: Some("Group for lifecycle testing 3".to_string()), - }, - MulticastGroupForTest { - name: "group-lifecycle-4", - multicast_ip: IpAddr::V4(Ipv4Addr::new(224, 30, 0, 104)), - description: Some("Group for lifecycle testing 4".to_string()), - }, + // Group names for implicit groups (implicitly created when first member joins) + let group_names = [ + "group-lifecycle-1", + "group-lifecycle-2", + "group-lifecycle-3", + "group-lifecycle-4", ]; - let groups = - create_multicast_groups(client, &mcast_pool, group_specs).await; - - // Wait for all groups to become active in parallel - let group_names: Vec<&str> = group_specs.iter().map(|g| g.name).collect(); - wait_for_groups_active(client, &group_names).await; - - // Create multiple instances in parallel - test various attachment scenarios + // Create instances first (groups will be implicitly created when members attach) let instances = vec![ - // Instance with group attached at creation + // Instance for group-lifecycle-1 (will implicitly create the group) instance_for_multicast_groups( cptestctx, PROJECT_NAME, "instance-create-attach", false, - &["group-lifecycle-1"], + &[], ) .await, // Instances for live attach/detach testing @@ -128,7 +112,24 @@ async fn test_multicast_lifecycle(cptestctx: &ControlPlaneTestContext) { .await, ]; - // Verify create-time attachment worked + // Implicitly create group-lifecycle-1 by adding a member + let member_add_url = format!( + "/v1/multicast-groups/{}/members?project={PROJECT_NAME}", + group_names[0] + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name("instance-create-attach".parse().unwrap()), + source_ips: None, + }; + object_create::<_, MulticastGroupMember>( + client, + &member_add_url, + &member_params, + ) + .await; + + // Wait for group-lifecycle-1 to become active and verify membership + wait_for_group_active(client, group_names[0]).await; wait_for_member_state( cptestctx, "group-lifecycle-1", @@ -139,15 +140,19 @@ async fn test_multicast_lifecycle(cptestctx: &ControlPlaneTestContext) { .await; // Live attach/detach operations - // Attach instance-live-1 to group-lifecycle-2 - multicast_group_attach( + // Attach instance-live-1 to group-lifecycle-2 (implicitly creates the group) + multicast_group_attach_with_pool( cptestctx, PROJECT_NAME, "instance-live-1", "group-lifecycle-2", + Some(mcast_pool.identity.name.as_str()), ) .await; + // Wait for group-lifecycle-2 to become active + wait_for_group_active(client, group_names[1]).await; + // Attach instance-live-2 to group-lifecycle-2 (test multiple instances per group) multicast_group_attach( cptestctx, @@ -169,23 +174,32 @@ async fn test_multicast_lifecycle(cptestctx: &ControlPlaneTestContext) { } // Multi-group attachment (instance to multiple groups) - // Attach instance-multi-groups to multiple groups - multicast_group_attach( + // Attach instance-multi-groups to group-lifecycle-3 (implicitly creates the group) + multicast_group_attach_with_pool( cptestctx, PROJECT_NAME, "instance-multi-groups", "group-lifecycle-3", + Some(mcast_pool.identity.name.as_str()), ) .await; - multicast_group_attach( + // Wait for group-lifecycle-3 to become active + wait_for_group_active(client, group_names[2]).await; + + // Attach instance-multi-groups to group-lifecycle-4 (implicitly creates the group) + multicast_group_attach_with_pool( cptestctx, PROJECT_NAME, "instance-multi-groups", "group-lifecycle-4", + Some(mcast_pool.identity.name.as_str()), ) .await; + // Wait for group-lifecycle-4 to become active + wait_for_group_active(client, group_names[3]).await; + // Verify multi-group membership for group_name in ["group-lifecycle-3", "group-lifecycle-4"] { wait_for_member_state( @@ -207,7 +221,7 @@ async fn test_multicast_lifecycle(cptestctx: &ControlPlaneTestContext) { ) .await; - // Test idempotency - detach again (should not error) + // Test idempotency multicast_group_detach( client, PROJECT_NAME, @@ -239,7 +253,7 @@ async fn test_multicast_lifecycle(cptestctx: &ControlPlaneTestContext) { assert_eq!(members[0].instance_id, instances[2].identity.id); // Verify groups are still active and functional - for (i, group_name) in group_names.iter().enumerate() { + for group_name in group_names.iter() { let group_url = mcast_group_url(group_name); let current_group: MulticastGroup = object_get(client, &group_url).await; @@ -247,10 +261,8 @@ async fn test_multicast_lifecycle(cptestctx: &ControlPlaneTestContext) { current_group.state, "Active", "Group {group_name} should remain Active throughout lifecycle" ); - assert_eq!(current_group.identity.id, groups[i].identity.id); } - // Cleanup - use our parallel cleanup functions cleanup_instances( cptestctx, client, @@ -264,7 +276,14 @@ async fn test_multicast_lifecycle(cptestctx: &ControlPlaneTestContext) { ) .await; - cleanup_multicast_groups(client, &group_names).await; + // Implicit model: groups are implicitly deleted when last member (instance) is removed + ops::join4( + wait_for_group_deleted(client, group_names[0]), + wait_for_group_deleted(client, group_names[1]), + wait_for_group_deleted(client, group_names[2]), + wait_for_group_deleted(client, group_names[3]), + ) + .await; } #[nexus_test] @@ -273,52 +292,66 @@ async fn test_multicast_group_attach_conflicts( ) { let client = &cptestctx.external_client; - create_default_ip_pool(&client).await; - create_project(client, PROJECT_NAME).await; - let mcast_pool = create_multicast_ip_pool_with_range( - &client, - "mcast-pool-conflicts", - (224, 23, 0, 1), // Unique range: 224.23.0.1 - (224, 23, 0, 255), // to 224.23.0.255 + // Create project and pools in parallel + let (_, _, _) = ops::join3( + create_default_ip_pool(&client), + create_project(client, PROJECT_NAME), + create_multicast_ip_pool_with_range( + &client, + "mcast-pool-conflicts", + (224, 23, 0, 1), // Unique range: 224.23.0.1 + (224, 23, 0, 255), // to 224.23.0.255 + ), ) .await; - // Create a multicast group - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 23, 0, 103)); - let group_url = "/v1/multicast-groups".to_string(); - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: "mcast-group-1".parse().unwrap(), - description: "Group for conflict testing".to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; - object_create::<_, MulticastGroup>(client, &group_url, ¶ms).await; - - // Wait for group to become Active before proceeding - wait_for_group_active(client, "mcast-group-1").await; - - // Create first instance with the multicast group + // Create first instance (implicit model: first instance creates the group) instance_for_multicast_groups( cptestctx, PROJECT_NAME, "mcast-instance-1", false, - &["mcast-group-1"], + &[], ) .await; - // Create second instance with the same multicast group + // Add instance1 to group (group implicitly creates if it doesn't exist) + let member_add_url = format!( + "{}?project={PROJECT_NAME}", + mcast_group_members_url("mcast-group-1") + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name("mcast-instance-1".parse().unwrap()), + source_ips: None, + }; + object_create::<_, MulticastGroupMember>( + client, + &member_add_url, + &member_params, + ) + .await; + + // Wait for group to become Active before proceeding + wait_for_group_active(client, "mcast-group-1").await; + + // Create second instance and add to same multicast group // This should succeed (multicast groups can have multiple members, unlike floating IPs) instance_for_multicast_groups( cptestctx, PROJECT_NAME, "mcast-instance-2", false, - &["mcast-group-1"], + &[], + ) + .await; + let member2_params = MulticastGroupMemberAdd { + instance: NameOrId::Name("mcast-instance-2".parse().unwrap()), + source_ips: None, + }; + object_create::<_, MulticastGroupMember>( + client, + &member_add_url, + &member2_params, ) .await; @@ -345,7 +378,6 @@ async fn test_multicast_group_attach_conflicts( "Multicast group should support multiple members (unlike floating IPs)" ); - // Clean up - use cleanup functions cleanup_instances( cptestctx, client, @@ -353,7 +385,7 @@ async fn test_multicast_group_attach_conflicts( &["mcast-instance-1", "mcast-instance-2"], ) .await; - cleanup_multicast_groups(client, &["mcast-group-1"]).await; + wait_for_group_deleted(client, "mcast-group-1").await; } #[nexus_test] @@ -362,61 +394,52 @@ async fn test_multicast_group_attach_limits( ) { let client = &cptestctx.external_client; - create_default_ip_pool(&client).await; - create_project(client, PROJECT_NAME).await; - let mcast_pool = create_multicast_ip_pool(&client, "mcast-pool").await; + // Create project and pools in parallel + let (_, _, mcast_pool) = ops::join3( + create_default_ip_pool(&client), + create_project(client, PROJECT_NAME), + create_multicast_ip_pool(&client, "mcast-pool"), + ) + .await; - // Create multiple multicast groups in parallel to test per-instance limits - let group_specs = &[ - MulticastGroupForTest { - name: "limit-test-group-0", - multicast_ip: IpAddr::V4(Ipv4Addr::new(224, 0, 1, 104)), - description: Some("Group 0 for limit testing".to_string()), - }, - MulticastGroupForTest { - name: "limit-test-group-1", - multicast_ip: IpAddr::V4(Ipv4Addr::new(224, 0, 1, 105)), - description: Some("Group 1 for limit testing".to_string()), - }, - MulticastGroupForTest { - name: "limit-test-group-2", - multicast_ip: IpAddr::V4(Ipv4Addr::new(224, 0, 1, 106)), - description: Some("Group 2 for limit testing".to_string()), - }, - MulticastGroupForTest { - name: "limit-test-group-3", - multicast_ip: IpAddr::V4(Ipv4Addr::new(224, 0, 1, 107)), - description: Some("Group 3 for limit testing".to_string()), - }, - MulticastGroupForTest { - name: "limit-test-group-4", - multicast_ip: IpAddr::V4(Ipv4Addr::new(224, 0, 1, 108)), - description: Some("Group 4 for limit testing".to_string()), - }, + // Group names for implicit groups (implicitly created when first member joins) + let group_names = [ + "limit-test-group-0", + "limit-test-group-1", + "limit-test-group-2", + "limit-test-group-3", + "limit-test-group-4", ]; - create_multicast_groups(client, &mcast_pool, group_specs).await; - let group_names: Vec<&str> = group_specs.iter().map(|g| g.name).collect(); - - // Wait for all groups to become Active in parallel - wait_for_groups_active(client, &group_names).await; - - // Try to create an instance with many multicast groups - // (Check if there's a reasonable limit per instance) - let multicast_group_names: Vec<&str> = group_names[0..3].to_vec(); - + // Create instance first (groups will be implicitly created when attached) let instance = instance_for_multicast_groups( cptestctx, PROJECT_NAME, "mcast-instance-1", false, - &multicast_group_names, // Test with 3 groups (reasonable limit) + &[], // No groups at creation ) .await; + // Attach instance to 3 groups (implicitly creates each group) + let multicast_group_names = &group_names[0..3]; + for group_name in multicast_group_names { + multicast_group_attach_with_pool( + cptestctx, + PROJECT_NAME, + "mcast-instance-1", + group_name, + Some(mcast_pool.identity.name.as_str()), + ) + .await; + } + + // Wait for all groups to become active in parallel + wait_for_groups_active(client, multicast_group_names).await; + // Wait for members to reach "Left" state for each group - // (instance is stopped, so member starts in "Left" state with no `sled_id`) - for group_name in &multicast_group_names { + // (instance is stopped, so member starts in "Left" state with no sled_id) + for group_name in multicast_group_names { wait_for_member_state( cptestctx, group_name, @@ -427,7 +450,7 @@ async fn test_multicast_group_attach_limits( } // Verify instance is member of multiple groups - for group_name in &multicast_group_names { + for group_name in multicast_group_names { let members_url = mcast_group_members_url(group_name); let members = nexus_test_utils::http_testing::NexusRequest::iter_collection_authn::( client, @@ -447,10 +470,16 @@ async fn test_multicast_group_attach_limits( assert_eq!(members[0].instance_id, instance.identity.id); } - // Clean up - use cleanup functions cleanup_instances(cptestctx, client, PROJECT_NAME, &["mcast-instance-1"]) .await; - cleanup_multicast_groups(client, &group_names).await; + // Groups are implicitly deleted when last member (instance) is removed + // Only 3 groups were created (group_names[0..3]) + ops::join3( + wait_for_group_deleted(client, group_names[0]), + wait_for_group_deleted(client, group_names[1]), + wait_for_group_deleted(client, group_names[2]), + ) + .await; } #[nexus_test] @@ -459,39 +488,43 @@ async fn test_multicast_group_instance_state_transitions( ) { let client = &cptestctx.external_client; - create_default_ip_pool(&client).await; - create_project(client, PROJECT_NAME).await; - let mcast_pool = create_multicast_ip_pool(&client, "mcast-pool").await; - - // Create a multicast group with explicit IP for easy DPD validation - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 0, 1, 200)); - let group_url = "/v1/multicast-groups".to_string(); - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: "state-test-group".parse().unwrap(), - description: "Group for testing instance state transitions" - .to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; - object_create::<_, MulticastGroup>(client, &group_url, ¶ms).await; - - // Wait for group to become Active before proceeding - wait_for_group_active(client, "state-test-group").await; + // Create project and pools in parallel + let (_, _, _) = ops::join3( + create_default_ip_pool(&client), + create_project(client, PROJECT_NAME), + create_multicast_ip_pool(&client, "mcast-pool"), + ) + .await; - // Create stopped instance and add to multicast group + // Create stopped instance (no multicast groups at creation) let stopped_instance = instance_for_multicast_groups( cptestctx, PROJECT_NAME, "state-test-instance", - false, // Create stopped - &["state-test-group"], + false, + &[], + ) + .await; + + // Add instance to group (group implicitly creates if it doesn't exist) + let member_add_url = format!( + "{}?project={PROJECT_NAME}", + mcast_group_members_url("state-test-group") + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name("state-test-instance".parse().unwrap()), + source_ips: None, + }; + object_create::<_, MulticastGroupMember>( + client, + &member_add_url, + &member_params, ) .await; + // Wait for group to become Active before proceeding + wait_for_group_active(client, "state-test-group").await; + // Verify instance is stopped and in multicast group assert_eq!(stopped_instance.runtime.run_state, InstanceState::Stopped); @@ -566,13 +599,13 @@ async fn test_multicast_group_instance_state_transitions( ); assert_eq!(final_members[0].instance_id, stopped_instance.identity.id); - // Clean up object_delete( client, &format!("/v1/instances/state-test-instance?project={PROJECT_NAME}"), ) .await; - object_delete(client, &mcast_group_url("state-test-group")).await; + + wait_for_group_deleted(client, "state-test-group").await; } /// Test that multicast group membership persists through instance stop/start cycles @@ -581,45 +614,54 @@ async fn test_multicast_group_instance_state_transitions( async fn test_multicast_group_persistence_through_stop_start( cptestctx: &ControlPlaneTestContext, ) { - let client = &cptestctx.external_client; - - create_default_ip_pool(&client).await; - create_project(client, PROJECT_NAME).await; - let mcast_pool = create_multicast_ip_pool(&client, "mcast-pool").await; + // Ensure inventory and DPD are ready before creating instances with multicast groups + ensure_multicast_test_ready(cptestctx).await; - // Create a multicast group - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 0, 1, 200)); - let group_url = "/v1/multicast-groups".to_string(); - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: "persist-test-group".parse().unwrap(), - description: "Group for stop/start persistence testing".to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; - object_create::<_, MulticastGroup>(client, &group_url, ¶ms).await; + let client = &cptestctx.external_client; - // Wait for group to become Active - wait_for_group_active(client, "persist-test-group").await; + // Create project and pools in parallel + let (_, _, _) = ops::join3( + create_default_ip_pool(&client), + create_project(client, PROJECT_NAME), + create_multicast_ip_pool(&client, "mcast-pool"), + ) + .await; - // Create instance with the multicast group and start it + // Create instance and start it (no multicast groups at creation) let instance = instance_for_multicast_groups( cptestctx, PROJECT_NAME, "persist-test-instance", - true, // start the instance - &["persist-test-group"], + true, + &[], + ) + .await; + + // Add instance to group (group implicitly creates if it doesn't exist) + let member_add_url = format!( + "{}?project={PROJECT_NAME}", + mcast_group_members_url("persist-test-group") + ); + let member_params = MulticastGroupMemberAdd { + instance: NameOrId::Name("persist-test-instance".parse().unwrap()), + source_ips: None, + }; + object_create::<_, MulticastGroupMember>( + client, + &member_add_url, + &member_params, ) .await; + // Wait for group to become Active + wait_for_group_active(client, "persist-test-group").await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); // Simulate the instance transitioning to Running state let nexus = &cptestctx.server.server_context().nexus; instance_simulate(nexus, &instance_id).await; + instance_wait_for_state(client, instance_id, InstanceState::Running).await; // Wait for member to be joined (reconciler will process the sled_id set by instance start) wait_for_member_state( @@ -748,7 +790,6 @@ async fn test_multicast_group_persistence_through_stop_start( ) .await; - // Clean up - use cleanup helper which handles stop/delete cleanup_instances( cptestctx, client, @@ -756,7 +797,8 @@ async fn test_multicast_group_persistence_through_stop_start( &["persist-test-instance"], ) .await; - cleanup_multicast_groups(client, &["persist-test-group"]).await; + // Group is implicitly deleted when last member (instance) is removed + wait_for_group_deleted(client, "persist-test-group").await; } /// Verify concurrent multicast operations maintain correct member states. @@ -769,33 +811,24 @@ async fn test_multicast_group_persistence_through_stop_start( async fn test_multicast_concurrent_operations( cptestctx: &ControlPlaneTestContext, ) { + // Ensure inventory and DPD are ready before creating instances with multicast groups + ensure_multicast_test_ready(cptestctx).await; + let client = &cptestctx.external_client; - create_default_ip_pool(&client).await; - create_project(client, PROJECT_NAME).await; - let mcast_pool = create_multicast_ip_pool_with_range( - &client, - "concurrent-pool", - (224, 40, 0, 1), - (224, 40, 0, 255), + // Create project and pools in parallel + let (_, _, mcast_pool) = ops::join3( + create_default_ip_pool(&client), + create_project(client, PROJECT_NAME), + create_multicast_ip_pool_with_range( + &client, + "concurrent-pool", + (224, 40, 0, 1), + (224, 40, 0, 255), + ), ) .await; - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 40, 0, 100)); - let group_url = "/v1/multicast-groups".to_string(); - let group_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: "concurrent-test-group".parse().unwrap(), - description: "Group for concurrent operations testing".to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; - object_create::<_, MulticastGroup>(client, &group_url, &group_params).await; - wait_for_group_active(client, "concurrent-test-group").await; - // Create multiple instances for concurrent testing let instance_names = [ "concurrent-instance-1", @@ -804,17 +837,28 @@ async fn test_multicast_concurrent_operations( "concurrent-instance-4", ]; - // Create all instances in parallel (now that we fixed the cleanup double-delete bug) + // Create all instances in parallel let create_futures = instance_names .iter() .map(|name| create_instance(client, PROJECT_NAME, name)); let instances = ops::join_all(create_futures).await; - // Attach all instances to the multicast group in parallel (this is the optimization) + // First instance attach with pool (implicitly creates the group) + multicast_group_attach_with_pool( + cptestctx, + PROJECT_NAME, + instance_names[0], + "concurrent-test-group", + Some(mcast_pool.identity.name.as_str()), + ) + .await; + wait_for_group_active(client, "concurrent-test-group").await; + + // Attach remaining instances to the existing group in parallel multicast_group_attach_bulk( cptestctx, PROJECT_NAME, - &instance_names, + &instance_names[1..], "concurrent-test-group", ) .await; @@ -886,7 +930,7 @@ async fn test_multicast_concurrent_operations( "concurrent-test-group", ) .await; - // Don't wait for reconciler - immediately do another operation + // Don't wait for reconciler; immediately do another operation multicast_group_detach( client, PROJECT_NAME, @@ -916,9 +960,9 @@ async fn test_multicast_concurrent_operations( .await; } - // Cleanup + // Cleanup and delete instances (group is implicitly deleted when last member removed) cleanup_instances(cptestctx, client, PROJECT_NAME, &instance_names).await; - cleanup_multicast_groups(client, &["concurrent-test-group"]).await; + wait_for_group_deleted(client, "concurrent-test-group").await; } /// Verify that multicast members are properly cleaned up when an instance @@ -937,35 +981,20 @@ async fn test_multicast_member_cleanup_instance_never_started( let group_name = "never-started-group"; let instance_name = "never-started-instance"; - // Setup: project, pools, group - create_project(client, project_name).await; - create_default_ip_pool(client).await; - let mcast_pool = create_multicast_ip_pool_with_range( - client, - "never-started-pool", - (224, 50, 0, 1), - (224, 50, 0, 255), + // Create project and pools in parallel + let (_, _, _) = ops::join3( + create_project(client, project_name), + create_default_ip_pool(client), + create_multicast_ip_pool_with_range( + client, + "never-started-pool", + (224, 50, 0, 1), + (224, 50, 0, 255), + ), ) .await; - // Create multicast group - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 50, 0, 100)); - let group_url = "/v1/multicast-groups".to_string(); - let group_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: group_name.parse().unwrap(), - description: "Group for never-started instance test".to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; - - object_create::<_, MulticastGroup>(client, &group_url, &group_params).await; - wait_for_group_active(client, group_name).await; - - // Create instance but don't start it - use start: false + // Create instance but don't start it let instance_params = InstanceCreate { identity: IdentityMetadataCreateParams { name: instance_name.parse().unwrap(), @@ -982,7 +1011,7 @@ async fn test_multicast_member_cleanup_instance_never_started( disks: vec![], boot_disk: None, cpu_platform: None, - start: false, // Critical: don't start the instance + start: false, // Don't start the instance auto_restart_policy: Default::default(), anti_affinity_groups: Vec::new(), }; @@ -991,14 +1020,15 @@ async fn test_multicast_member_cleanup_instance_never_started( let instance: Instance = object_create(client, &instance_url, &instance_params).await; - // Add instance as multicast member (will be in "Left" state since instance - // is stopped with no sled_id) + // Add instance as multicast member (implicitly creates group) + // Member will be in "Left" state since instance is stopped with no sled_id let member_add_url = format!( "{}?project={project_name}", mcast_group_members_url(group_name) ); let member_params = MulticastGroupMemberAdd { instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: None, }; object_create::<_, MulticastGroupMember>( @@ -1007,6 +1037,7 @@ async fn test_multicast_member_cleanup_instance_never_started( &member_params, ) .await; + wait_for_group_active(client, group_name).await; // Wait for member to reach "Left" state (stopped instance with no sled_id) wait_for_member_state( @@ -1021,38 +1052,18 @@ async fn test_multicast_member_cleanup_instance_never_started( let members = list_multicast_group_members(client, group_name).await; assert_eq!(members.len(), 1, "Should have one member"); - // Delete the instance directly without starting it - // This simulates the case where an instance is created, added to multicast group, - // but then deleted before ever starting (never gets a sled assignment) - let instance_url = - format!("/v1/instances/{instance_name}?project={project_name}"); - object_delete(client, &instance_url).await; - - // Wait for reconciler to process the deletion - wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; - - // Critical test: Verify the orphaned member was cleaned up - // The RPW reconciler should detect that the member's instance was deleted - // and remove the member from the group - let final_members = list_multicast_group_members(client, group_name).await; - assert_eq!( - final_members.len(), - 0, - "Orphaned member should be cleaned up when instance is deleted without starting" - ); - - // Verify that stale ports were removed from DPD - // Since the instance never started (never had a `sled_id`), there should be - // no rear/underlay ports in DPD for this group. This verifies the reconciler - // only removes ports when it has complete information about all "Joined" members. - - // Get the underlay group IP from the database + // Save underlay group info BEFORE deleting the instance + // (After deletion, the group will be deleted too since it was implicitly created) let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests(cptestctx.logctx.log.clone(), datastore.clone()); - // Fetch the external group to get its underlay_group_id + // Fetch the external group from the view to get its multicast_ip + let external_group_view = get_multicast_group(client, group_name).await; + let multicast_ip = external_group_view.multicast_ip; + + // Fetch the external group from datastore to get its underlay_group_id let external_group = datastore .multicast_group_lookup_by_ip(&opctx, multicast_ip) .await @@ -1070,42 +1081,24 @@ async fn test_multicast_member_cleanup_instance_never_started( let underlay_multicast_ip = underlay_group.multicast_ip.ip(); - // Query DPD for the underlay group (where instance members are stored) - let dpd_client = nexus_test_utils::dpd_client(cptestctx); - let dpd_group_response = dpd_client - .multicast_group_get(&underlay_multicast_ip) - .await - .expect("Should be able to query DPD for underlay multicast group"); - - // Extract underlay members from the response - let underlay_members = match dpd_group_response.into_inner() { - dpd_client::types::MulticastGroupResponse::Underlay { - members, .. - } => members, - dpd_client::types::MulticastGroupResponse::External { .. } => { - panic!( - "Expected underlay group when querying underlay IP, got external" - ); - } - }; - - // Filter to only rear/underlay members (instance members on backplane) - let rear_underlay_members: Vec<_> = underlay_members - .iter() - .filter(|m| { - matches!(m.port_id, dpd_client::types::PortId::Rear(_)) - && m.direction == dpd_client::types::Direction::Underlay - }) - .collect(); + // Delete the instance directly without starting it + // This simulates the case where an instance is created, added to multicast group, + // but then deleted before ever starting (never gets a sled assignment) + let instance_url = + format!("/v1/instances/{instance_name}?project={project_name}"); + object_delete(client, &instance_url).await; - assert_eq!( - rear_underlay_members.len(), - 0, - "DPD should have no rear/underlay ports after instance deletion and reconciler run" - ); + // Verify the orphaned member was cleaned up + // The RPW reconciler should detect that the member's instance was deleted + // and remove the member from the group. Since this was an implicitly created + // group and the last member was removed, the group itself should be deleted. + wait_for_group_deleted(client, group_name).await; - // Cleanup - cleanup_multicast_groups(client, &[group_name]).await; + // Verify that stale ports were removed from DPD + // Since the instance never started (never had a `sled_id`), there should be + // no rear/underlay ports in DPD for this group. + // Note: We use the underlay IP we saved before deleting the instance. + wait_for_group_deleted_from_dpd(cptestctx, underlay_multicast_ip).await; } /// Verify multicast group membership persists through instance migration. @@ -1119,6 +1112,9 @@ async fn test_multicast_member_cleanup_instance_never_started( async fn test_multicast_group_membership_during_migration( cptestctx: &ControlPlaneTestContext, ) { + // Ensure inventory and DPD are ready before creating instances with multicast groups + ensure_multicast_test_ready(cptestctx).await; + let client = &cptestctx.external_client; let lockstep_client = &cptestctx.lockstep_client; let nexus = &cptestctx.server.server_context().nexus; @@ -1126,51 +1122,43 @@ async fn test_multicast_group_membership_during_migration( let group_name = "migration-test-group"; let instance_name = "migration-test-instance"; - // Setup: project, pools, and multicast group - create_project(client, project_name).await; - create_default_ip_pool(client).await; - let mcast_pool = create_multicast_ip_pool_with_range( - client, - "migration-pool", - (224, 60, 0, 1), - (224, 60, 0, 255), + // Create project and pools in parallel + let (_, _, mcast_pool) = ops::join3( + create_project(client, project_name), + create_default_ip_pool(client), + create_multicast_ip_pool_with_range( + client, + "migration-pool", + (224, 60, 0, 1), + (224, 60, 0, 255), + ), ) .await; - // Create multicast group with mvlan - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 60, 0, 100)); - let group_url = "/v1/multicast-groups".to_string(); - let group_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: group_name.parse().unwrap(), - description: "Group for migration testing with mvlan".to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: Some(VlanID::new(3000).unwrap()), // Test mvlan persistence through migration - }; - - let created_group: MulticastGroup = - object_create(client, &group_url, &group_params).await; - wait_for_group_active(client, group_name).await; - - // Verify mvlan is set - assert_eq!( - created_group.mvlan, - Some(VlanID::new(3000).unwrap()), - "MVLAN should be set on group creation" - ); - - // Create and start instance with multicast group membership + // Create and start instance first (no multicast groups at creation) let instance = instance_for_multicast_groups( cptestctx, project_name, instance_name, - true, // start the instance - &[group_name], + true, + &[], + ) + .await; + + // Add instance to group (group implicitly creates if it doesn't exist) + multicast_group_attach_with_pool( + cptestctx, + project_name, + instance_name, + group_name, + Some(mcast_pool.identity.name.as_str()), ) .await; + wait_for_group_active(client, group_name).await; + + // Get the group's multicast IP for DPD verification later + let created_group = get_multicast_group(client, group_name).await; + let multicast_ip = created_group.multicast_ip; let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); @@ -1193,29 +1181,13 @@ async fn test_multicast_group_membership_during_migration( assert_eq!(pre_migration_members[0].instance_id, instance.identity.id); assert_eq!(pre_migration_members[0].state, "Joined"); - // Verify mvlan is in DPD before migration + // Verify group exists in DPD before migration let dpd_client = nexus_test_utils::dpd_client(cptestctx); - let pre_migration_dpd_group = dpd_client + dpd_client .multicast_group_get(&multicast_ip) .await .expect("Multicast group should exist in DPD before migration"); - match pre_migration_dpd_group.into_inner() { - dpd_client::types::MulticastGroupResponse::External { - external_forwarding, - .. - } => { - assert_eq!( - external_forwarding.vlan_id, - Some(3000), - "DPD should show vlan_id=3000 before migration" - ); - } - dpd_client::types::MulticastGroupResponse::Underlay { .. } => { - panic!("Expected external group, got underlay"); - } - } - // Get source and target sleds for migration let source_sled_id = nexus .active_instance_info(&instance_id, None) @@ -1246,7 +1218,7 @@ async fn test_multicast_group_membership_during_migration( .await .expect("Should initiate instance migration"); - // Get propolis IDs for source and target - follow the pattern from existing tests + // Get propolis IDs for source and target let info = nexus .active_instance_info(&instance_id, None) .await @@ -1342,31 +1314,15 @@ async fn test_multicast_group_membership_during_migration( // This confirms the RPW reconciler correctly mapped the new sled to its rear port verify_inventory_based_port_mapping(cptestctx, &instance_id) .await - .expect("port mapping should be updated after migration"); + .expect("Port mapping should be updated after migration"); - // Verify mvlan persisted in DPD after migration - let post_migration_dpd_group = dpd_client + // Verify group still exists in DPD after migration + dpd_client .multicast_group_get(&multicast_ip) .await .expect("Multicast group should exist in DPD after migration"); - match post_migration_dpd_group.into_inner() { - dpd_client::types::MulticastGroupResponse::External { - external_forwarding, - .. - } => { - assert_eq!( - external_forwarding.vlan_id, - Some(3000), - "DPD should still show vlan_id=3000 after migration - mvlan must persist" - ); - } - dpd_client::types::MulticastGroupResponse::Underlay { .. } => { - panic!("Expected external group, got underlay"); - } - } - - // Cleanup: Stop and delete instance, then cleanup group + // Cleanup: Stop and delete instance let stop_url = format!("/v1/instances/{instance_name}/stop?project={project_name}"); nexus_test_utils::http_testing::NexusRequest::new( @@ -1387,14 +1343,15 @@ async fn test_multicast_group_membership_during_migration( instance_simulate(nexus, &instance_id).await; instance_wait_for_state(client, instance_id, InstanceState::Stopped).await; - // Delete instance and cleanup + // Delete instance; group is implicitly deleted when last member removed object_delete( client, &format!("/v1/instances/{instance_name}?project={project_name}"), ) .await; - cleanup_multicast_groups(client, &[group_name]).await; + // Implicit model: group is implicitly deleted when last member (instance) is removed + wait_for_group_deleted(client, group_name).await; } /// Verify the RPW reconciler handles concurrent instance migrations within the same multicast group. @@ -1413,48 +1370,46 @@ async fn test_multicast_group_concurrent_member_migrations( let project_name = "concurrent-migration-project"; let group_name = "concurrent-migration-group"; - // Setup: project, pools, and multicast group - create_project(client, project_name).await; - create_default_ip_pool(client).await; - let mcast_pool = create_multicast_ip_pool_with_range( - client, - "concurrent-migration-pool", - (224, 62, 0, 1), - (224, 62, 0, 255), + // Create project and pools in parallel + let (_, _, mcast_pool) = ops::join3( + create_project(client, project_name), + create_default_ip_pool(client), + create_multicast_ip_pool_with_range( + client, + "concurrent-migration-pool", + (224, 62, 0, 1), + (224, 62, 0, 255), + ), ) .await; - // Create multicast group - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 62, 0, 100)); - let group_url = "/v1/multicast-groups".to_string(); - let group_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: group_name.parse().unwrap(), - description: "Group for concurrent migration testing".to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; - - object_create::<_, MulticastGroup>(client, &group_url, &group_params).await; - wait_for_group_active(client, group_name).await; - - // Ensure inventory and DPD are ready before creating instances with multicast groups + // Ensure inventory and DPD are ready before creating instances ensure_multicast_test_ready(cptestctx).await; - // Create multiple instances all in the same multicast group - let instance_specs = [ - ("concurrent-instance-1", &[group_name][..]), - ("concurrent-instance-2", &[group_name][..]), - ]; + // Create multiple instances + let instance_names = ["concurrent-instance-1", "concurrent-instance-2"]; + let create_futures = instance_names + .iter() + .map(|name| create_instance(client, project_name, name)); + let instances = ops::join_all(create_futures).await; - let instances = create_instances_with_multicast_groups( - client, + // First instance attach with pool (implicitly creates the group) + multicast_group_attach_with_pool( + cptestctx, + project_name, + instance_names[0], + group_name, + Some(mcast_pool.identity.name.as_str()), + ) + .await; + wait_for_group_active(client, group_name).await; + + // Second instance attach (group already exists) + multicast_group_attach( + cptestctx, project_name, - &instance_specs, - true, // start instances + instance_names[1], + group_name, ) .await; @@ -1625,8 +1580,7 @@ async fn test_multicast_group_concurrent_member_migrations( .await; } - // Cleanup - let instance_names = ["concurrent-instance-1", "concurrent-instance-2"]; + // Cleanup and delete instances (group is automatically deleted when last member removed) cleanup_instances(cptestctx, client, project_name, &instance_names).await; - cleanup_multicast_groups(client, &[group_name]).await; + wait_for_group_deleted(client, group_name).await; } diff --git a/nexus/tests/integration_tests/multicast/mod.rs b/nexus/tests/integration_tests/multicast/mod.rs index fe818dd60e6..ed4c06c46fb 100644 --- a/nexus/tests/integration_tests/multicast/mod.rs +++ b/nexus/tests/integration_tests/multicast/mod.rs @@ -2,10 +2,19 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! Multicast integration tests and helper methods. +//! Multicast integration tests and shared helper methods. +//! +//! This module provides common test infrastructure: +//! +//! - URL builders: `mcast_group_url`, `mcast_group_members_url`, etc. +//! - IP pool setup: `create_multicast_ip_pool`, `create_multicast_ip_pool_with_range` +//! - Reconciler control: `wait_for_multicast_reconciler`, `activate_multicast_reconciler` +//! - State waiters: `wait_for_group_active`, `wait_for_member_state`, etc. +//! - DPD verification: `verify_inventory_based_port_mapping`, `wait_for_group_deleted_from_dpd` +//! - Instance helpers: `instance_for_multicast_groups`, `cleanup_instances` +//! - Attach/detach: `multicast_group_attach`, `multicast_group_detach` use std::future::Future; -use std::net::IpAddr; use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; @@ -22,8 +31,8 @@ use nexus_test_utils::resource_helpers::{ }; use nexus_types::deployment::SledFilter; use nexus_types::external_api::params::{ - InstanceCreate, InstanceNetworkInterfaceAttachment, IpPoolCreate, - MulticastGroupCreate, + InstanceCreate, InstanceMulticastGroupJoin, + InstanceNetworkInterfaceAttachment, IpPoolCreate, MulticastGroupIdentifier, }; use nexus_types::external_api::shared::{IpRange, Ipv4Range}; use nexus_types::external_api::views::{ @@ -32,7 +41,7 @@ use nexus_types::external_api::views::{ use nexus_types::identity::{Asset, Resource}; use omicron_common::api::external::{ ByteCount, Hostname, IdentityMetadataCreateParams, Instance, - InstanceAutoRestartPolicy, InstanceCpuCount, InstanceState, NameOrId, + InstanceCpuCount, InstanceState, NameOrId, }; use omicron_nexus::TestInterfaces; use omicron_test_utils::dev::poll::{self, CondCheckError, wait_for_condition}; @@ -57,7 +66,7 @@ mod networking_integration; const POLL_INTERVAL: Duration = Duration::from_millis(80); const MULTICAST_OPERATION_TIMEOUT: Duration = Duration::from_secs(120); -/// Build URL for listing all multicast groups (fleet-scoped). +/// Build URL for listing multicast groups. pub(crate) fn mcast_groups_url() -> String { "/v1/multicast-groups".to_string() } @@ -75,7 +84,7 @@ pub(crate) fn mcast_group_members_url(group_name: &str) -> String { /// Build URL for adding a member to a multicast group. /// /// The `?project=` parameter is required when using instance names (for scoping) -/// but must NOT be provided when using instance UUIDs (causes 400 Bad Request). +/// but must not be provided when using instance UUIDs (causes 400 Bad Request). pub(crate) fn mcast_group_member_add_url( group_name: &str, instance: &NameOrId, @@ -88,15 +97,11 @@ pub(crate) fn mcast_group_member_add_url( } } -/// Test helper for creating multicast groups in batch operations. -#[derive(Clone)] -pub(crate) struct MulticastGroupForTest { - pub name: &'static str, - pub multicast_ip: IpAddr, - pub description: Option, -} - /// Create a multicast IP pool for ASM (Any-Source Multicast) testing. +/// +/// Uses range 224.2.0.0 - 224.2.255.255 which avoids all reserved addresses: +/// - 224.0.0.0/24 (link-local) +/// - 224.0.1.1 (NTP), 224.0.1.39/40 (Cisco Auto-RP), 224.0.1.129-132 (PTP) pub(crate) async fn create_multicast_ip_pool( client: &ClientTestContext, pool_name: &str, @@ -104,8 +109,8 @@ pub(crate) async fn create_multicast_ip_pool( create_multicast_ip_pool_with_range( client, pool_name, - (224, 0, 1, 10), // Default ASM range start - (224, 0, 1, 255), // Default ASM range end + (224, 2, 0, 0), // Default ASM range start + (224, 2, 255, 255), // Default ASM range end ) .await } @@ -168,6 +173,20 @@ pub(crate) async fn wait_for_multicast_reconciler( .await } +/// Activates the multicast reconciler and waits for it to complete. +/// +/// Use this when you need to explicitly trigger the reconciler (e.g., after +/// restarting DPD) rather than waiting for an already-triggered run. +pub(crate) async fn activate_multicast_reconciler( + lockstep_client: &ClientTestContext, +) -> nexus_lockstep_client::types::BackgroundTask { + nexus_test_utils::background::activate_background_task( + lockstep_client, + "multicast_reconciler", + ) + .await +} + /// Wait for a condition to be true, activating the reconciler periodically. /// /// This is like `wait_for_condition` but activates the multicast reconciler @@ -382,133 +401,6 @@ pub(crate) async fn ensure_dpd_ready(cptestctx: &ControlPlaneTestContext) { } } -/// Wait for DPD multicast group state to match a condition. -/// -/// Generic helper that polls DPD state and calls the provided predicate -/// to determine if the expected state has been reached. This is useful when -/// the reconciler runs sagas asynchronously and tests need to wait for DPD -/// to reflect the changes. -/// -/// # Usage Examples -/// -/// Check for a specific vlan_id: -/// ```rust,ignore -/// wait_for_dpd_state( -/// cptestctx, -/// &multicast_ip, -/// |response| match response { -/// MulticastGroupResponse::External { external_forwarding, .. } => { -/// if external_forwarding.vlan_id == Some(3500) { -/// Ok(()) -/// } else { -/// Err(CondCheckError::NotYet) -/// } -/// } -/// _ => Err(CondCheckError::Failed("Expected external group".to_string())) -/// }, -/// "vlan_id = Some(3500)", -/// ).await; -/// ``` -/// -/// Check for source IP changes: -/// ```rust,ignore -/// wait_for_dpd_state( -/// cptestctx, -/// &multicast_ip, -/// |response| match response { -/// MulticastGroupResponse::External { sources, .. } => { -/// if sources.contains(&expected_source) { -/// Ok(()) -/// } else { -/// Err(CondCheckError::NotYet) -/// } -/// } -/// _ => Err(CondCheckError::Failed("Expected external group".to_string())) -/// }, -/// "sources contains expected IP", -/// ).await; -/// ``` -pub(crate) async fn wait_for_dpd_state( - cptestctx: &ControlPlaneTestContext, - multicast_ip: &IpAddr, - predicate: F, - description: &str, -) where - F: Fn( - &dpd_client::types::MulticastGroupResponse, - ) -> Result<(), CondCheckError>, -{ - let dpd_client = nexus_test_utils::dpd_client(cptestctx); - - match wait_for_condition( - || async { - match dpd_client.multicast_group_get(multicast_ip).await { - Ok(response) => predicate(&response.into_inner()), - Err(e) => Err(CondCheckError::Failed(format!( - "DPD query failed: {e}" - ))), - } - }, - &POLL_INTERVAL, - &MULTICAST_OPERATION_TIMEOUT, - ) - .await - { - Ok(_) => {} - Err(poll::Error::TimedOut(elapsed)) => panic!( - "DPD state for {multicast_ip} did not reach expected condition '{description}' within {elapsed:?}" - ), - Err(poll::Error::PermanentError(err)) => { - panic!("Failed waiting for DPD state '{description}': {err}") - } - } -} - -/// Wait for a multicast group DPD update to complete. -/// -/// This is a composite helper that combines activating the reconciler -/// and waiting for DPD state to match a condition. Use this instead of -/// calling `wait_for_multicast_reconciler()` + `wait_for_dpd_state()` -/// separately. -/// -/// # Usage Examples -/// -/// After a metadata-only update (name/description): -/// ```rust,ignore -/// wait_for_group_dpd_update( -/// cptestctx, -/// &multicast_ip, -/// dpd_predicates::expect_external_group(), -/// "name update saga completed", -/// ).await; -/// ``` -/// -/// After an mvlan update: -/// ```rust,ignore -/// wait_for_group_dpd_update( -/// cptestctx, -/// &multicast_ip, -/// dpd_predicates::expect_vlan_id(3500), -/// "vlan_id updated to 3500", -/// ).await; -/// ``` -pub(crate) async fn wait_for_group_dpd_update( - cptestctx: &ControlPlaneTestContext, - multicast_ip: &IpAddr, - predicate: F, - description: &str, -) where - F: Fn( - &dpd_client::types::MulticastGroupResponse, - ) -> Result<(), CondCheckError>, -{ - // Activate reconciler to ensure saga is launched - wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; - - // Wait for DPD to reflect the changes (saga completion) - wait_for_dpd_state(cptestctx, multicast_ip, predicate, description).await; -} - /// Get a single multicast group by name. pub(crate) async fn get_multicast_group( client: &ClientTestContext, @@ -809,7 +701,7 @@ pub(crate) async fn verify_inventory_based_port_mapping( // Get the multicast member for this instance to find its external_group_id let members = datastore - .multicast_group_members_list_by_instance(&opctx, *instance_uuid, false) + .multicast_group_members_list_by_instance(&opctx, *instance_uuid) .await .map_err(|e| format!("list members failed: {e}"))?; @@ -915,6 +807,9 @@ pub(crate) async fn verify_inventory_based_port_mapping( } /// Wait for a multicast group to have a specific number of members. +/// +/// Note: For expected_count=0 (last member removed), use `wait_for_group_deleted` +/// instead since the implicit deletion deletes the group when empty. pub(crate) async fn wait_for_member_count( client: &ClientTestContext, group_name: &str, @@ -989,6 +884,80 @@ pub(crate) async fn wait_for_group_deleted( } } +/// Verify a group is either deleted or in one of the expected states. +/// +/// Useful when DPD is unavailable and groups can't complete state transitions. +/// For example, when DPD is down during deletion, groups may be stuck in +/// "Creating" or "Deleting" state rather than being fully deleted. +pub(crate) async fn verify_group_deleted_or_in_states( + client: &ClientTestContext, + group_name: &str, + expected_states: &[&str], +) { + let groups_result = + nexus_test_utils::resource_helpers::objects_list_page_authz::< + MulticastGroup, + >(client, "/v1/multicast-groups") + .await; + + let matching_groups: Vec<_> = groups_result + .items + .into_iter() + .filter(|g| g.identity.name == group_name) + .collect(); + + if !matching_groups.is_empty() { + // Group still exists - should be in one of the expected states + let actual_state = &matching_groups[0].state; + assert!( + expected_states.contains(&actual_state.as_str()), + "Group {group_name} should be in one of {expected_states:?} states, found: \"{actual_state}\"" + ); + } + // If group is gone, that's also valid - operation completed +} + +/// Wait for a multicast group to be deleted from DPD (dataplane) with reconciler activation. +/// +/// This function waits for the DPD to report that the multicast group no longer exists +/// (returns 404), while periodically activating the reconciler to drive the cleanup process. +pub(crate) async fn wait_for_group_deleted_from_dpd( + cptestctx: &ControlPlaneTestContext, + multicast_ip: std::net::IpAddr, +) { + let lockstep_client = &cptestctx.lockstep_client; + let dpd_client = nexus_test_utils::dpd_client(cptestctx); + + match wait_for_condition_with_reconciler( + lockstep_client, + || async { + match dpd_client.multicast_group_get(&multicast_ip).await { + Ok(_) => { + // Group still exists in DPD - not yet deleted + Err(CondCheckError::<()>::NotYet) + } + Err(_) => Ok(()), // Group doesn't exist - deleted + } + }, + &POLL_INTERVAL, + &MULTICAST_OPERATION_TIMEOUT, + ) + .await + { + Ok(_) => {} + Err(poll::Error::TimedOut(elapsed)) => { + panic!( + "group with IP {multicast_ip} was not deleted from DPD within {elapsed:?}", + ); + } + Err(poll::Error::PermanentError(err)) => { + panic!( + "failed waiting for group with IP {multicast_ip} to be deleted from DPD: {err:?}", + ); + } + } +} + /// Create an instance with multicast groups. pub(crate) async fn instance_for_multicast_groups( cptestctx: &ControlPlaneTestContext, @@ -1005,9 +974,9 @@ pub(crate) async fn instance_for_multicast_groups( } let client = &cptestctx.external_client; - let multicast_groups: Vec = multicast_group_names + let multicast_groups: Vec<_> = multicast_group_names .iter() - .map(|name| NameOrId::Name(name.parse().unwrap())) + .map(|name| MulticastGroupIdentifier::Name(name.parse().unwrap())) .collect(); let url = format!("/v1/instances?project={project_name}"); @@ -1041,110 +1010,58 @@ pub(crate) async fn instance_for_multicast_groups( .await } -/// Create multiple instances with multicast groups attached at creation time. -pub(crate) async fn create_instances_with_multicast_groups( - client: &ClientTestContext, +/// Attach an instance to a multicast group. +/// +/// If the group doesn't exist and is referenced by name, it will be implicitly created +/// using the specified pool (required for implicit creation). +pub(crate) async fn multicast_group_attach( + cptestctx: &ControlPlaneTestContext, project_name: &str, - instance_specs: &[(&str, &[&str])], // (instance_name, group_names) - start: bool, -) -> Vec { - let create_futures = - instance_specs.iter().map(|(instance_name, group_names)| { - let url = format!("/v1/instances?project={project_name}"); - let multicast_groups: Vec = group_names - .iter() - .map(|name| NameOrId::Name(name.parse().unwrap())) - .collect(); - - async move { - object_create::<_, Instance>( - client, - &url, - &InstanceCreate { - identity: IdentityMetadataCreateParams { - name: instance_name.parse().unwrap(), - description: format!( - "multicast test instance {instance_name}" - ), - }, - ncpus: InstanceCpuCount::try_from(2).unwrap(), - memory: ByteCount::from_gibibytes_u32(4), - hostname: instance_name.parse().unwrap(), - user_data: b"#cloud-config".to_vec(), - ssh_public_keys: None, - network_interfaces: - InstanceNetworkInterfaceAttachment::Default, - external_ips: vec![], - disks: vec![], - boot_disk: None, - cpu_platform: None, - start, - auto_restart_policy: Some( - InstanceAutoRestartPolicy::Never, - ), - anti_affinity_groups: Vec::new(), - multicast_groups, - }, - ) - .await - } - }); - - ops::join_all(create_futures).await + instance_name: &str, + group_name: &str, +) { + multicast_group_attach_with_pool( + cptestctx, + project_name, + instance_name, + group_name, + None, + ) + .await } -/// Attach an instance to a multicast group. -pub(crate) async fn multicast_group_attach( +/// Attach an instance to a multicast group, specifying a pool for implicit creation. +/// +/// If the group doesn't exist and is referenced by name, it will be implicitly created +/// using the specified pool. +pub(crate) async fn multicast_group_attach_with_pool( cptestctx: &ControlPlaneTestContext, project_name: &str, instance_name: &str, group_name: &str, + _pool: Option<&str>, ) { let client = &cptestctx.external_client; let url = format!( "/v1/instances/{instance_name}/multicast-groups/{group_name}?project={project_name}" ); + let body = InstanceMulticastGroupJoin { source_ips: None }; + // Use PUT to attach instance to multicast group - NexusRequest::new( + let response = NexusRequest::new( RequestBuilder::new(client, Method::PUT, &url) + .body(Some(&body)) .expect_status(Some(StatusCode::CREATED)), ) .authn_as(AuthnMode::PrivilegedUser) .execute() .await .expect("Should attach instance to multicast group"); -} - -/// Create multiple multicast groups from the same pool. -pub(crate) async fn create_multicast_groups( - client: &ClientTestContext, - pool: &IpPool, - group_specs: &[MulticastGroupForTest], -) -> Vec { - let create_futures = group_specs.iter().map(|spec| { - let group_url = mcast_groups_url(); - let params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: spec.name.parse().unwrap(), - description: spec - .description - .clone() - .unwrap_or_else(|| format!("Test group {}", spec.name)), - }, - multicast_ip: Some(spec.multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name(pool.identity.name.clone())), - mvlan: None, - }; - - async move { - object_create::<_, MulticastGroup>(client, &group_url, ¶ms) - .await - } - }); - ops::join_all(create_futures).await + response + .parsed_body::() + .expect("Should parse member"); } /// Wait for multiple groups to become "Active". @@ -1158,19 +1075,6 @@ pub(crate) async fn wait_for_groups_active( ops::join_all(wait_futures).await } -/// Clean up multiple groups. -pub(crate) async fn cleanup_multicast_groups( - client: &ClientTestContext, - group_names: &[&str], -) { - let delete_futures = group_names.iter().map(|name| { - let url = mcast_group_url(name); - async move { object_delete(client, &url).await } - }); - - ops::join_all(delete_futures).await; -} - /// Clean up multiple instances, handling various states properly. /// /// This function handles the complete instance lifecycle for cleanup: @@ -1275,7 +1179,6 @@ pub(crate) async fn stop_instances( ) { let nexus = &cptestctx.server.server_context().nexus; - // First, fetch all instances in parallel let fetch_futures = instance_names.iter().map(|name| { let url = format!("/v1/instances/{name}?project={project_name}"); async move { @@ -1459,54 +1362,3 @@ pub(crate) mod ops { tokio::join!(op1, op2, op3, op4) } } - -/// Common DPD state predicates for use with `wait_for_dpd_state()`. -/// -/// These predicates provide pre-built conditions for common DPD state checks. -pub(crate) mod dpd_predicates { - use super::*; - - /// Predicate that checks if a group exists in DPD as an external group. - /// - /// Used for metadata-only updates (name, description) where DPD state - /// doesn't change but we need to verify the saga completed without errors. - pub fn expect_external_group() -> impl Fn( - &dpd_client::types::MulticastGroupResponse, - ) - -> Result<(), CondCheckError> { - |response| match response { - dpd_client::types::MulticastGroupResponse::External { .. } => { - Ok(()) - } - dpd_client::types::MulticastGroupResponse::Underlay { .. } => Err( - CondCheckError::Failed("Expected external group".to_string()), - ), - } - } - - /// Predicate that checks if a group has a specific vlan_id in DPD. - /// - /// Used for mvlan updates where we need to verify the vlan_id was - /// applied to the dataplane. - pub fn expect_vlan_id( - vlan: u16, - ) -> impl Fn( - &dpd_client::types::MulticastGroupResponse, - ) -> Result<(), CondCheckError> { - move |response| match response { - dpd_client::types::MulticastGroupResponse::External { - external_forwarding, - .. - } => { - if external_forwarding.vlan_id == Some(vlan) { - Ok(()) - } else { - Err(CondCheckError::NotYet) - } - } - dpd_client::types::MulticastGroupResponse::Underlay { .. } => Err( - CondCheckError::Failed("Expected external group".to_string()), - ), - } - } -} diff --git a/nexus/tests/integration_tests/multicast/networking_integration.rs b/nexus/tests/integration_tests/multicast/networking_integration.rs index 1ed2b1138d7..6074fd5926a 100644 --- a/nexus/tests/integration_tests/multicast/networking_integration.rs +++ b/nexus/tests/integration_tests/multicast/networking_integration.rs @@ -8,8 +8,6 @@ //! when combined with other networking features like external IPs, floating IPs, //! and complex network configurations. -use std::net::{IpAddr, Ipv4Addr}; - use http::{Method, StatusCode}; use nexus_test_utils::http_testing::{AuthnMode, NexusRequest, RequestBuilder}; use nexus_test_utils::resource_helpers::create_floating_ip; @@ -19,12 +17,9 @@ use nexus_test_utils::resource_helpers::{ use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params::{ EphemeralIpCreate, ExternalIpCreate, FloatingIpAttach, InstanceCreate, - InstanceNetworkInterfaceAttachment, MulticastGroupCreate, - MulticastGroupMemberAdd, -}; -use nexus_types::external_api::views::{ - FloatingIp, MulticastGroup, MulticastGroupMember, + InstanceNetworkInterfaceAttachment, MulticastGroupMemberAdd, }; +use nexus_types::external_api::views::{FloatingIp, MulticastGroupMember}; use omicron_common::api::external::{ ByteCount, IdentityMetadataCreateParams, Instance, InstanceCpuCount, @@ -54,7 +49,7 @@ async fn test_multicast_with_external_ip_basic( let instance_name = "external-ip-mcast-instance"; // Setup: project and IP pools in parallel - let (_, _, mcast_pool) = ops::join3( + let (_, _, _) = ops::join3( create_project(client, project_name), create_default_ip_pool(client), // For external IPs create_multicast_ip_pool_with_range( @@ -66,23 +61,6 @@ async fn test_multicast_with_external_ip_basic( ) .await; - // Create multicast group - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 100, 0, 50)); - let group_url = "/v1/multicast-groups".to_string(); - let group_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: group_name.parse().unwrap(), - description: "Group for external IP integration test".to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; - - object_create::<_, MulticastGroup>(client, &group_url, &group_params).await; - wait_for_group_active(client, group_name).await; - // Create instance (will start by default) let instance_params = InstanceCreate { identity: IdentityMetadataCreateParams { @@ -128,6 +106,7 @@ async fn test_multicast_with_external_ip_basic( ); let member_params = MulticastGroupMemberAdd { instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: None, }; object_create::<_, MulticastGroupMember>( @@ -136,6 +115,7 @@ async fn test_multicast_with_external_ip_basic( &member_params, ) .await; + wait_for_group_active(client, group_name).await; // Wait for multicast member to reach "Joined" state wait_for_member_state( @@ -226,7 +206,8 @@ async fn test_multicast_with_external_ip_basic( // Cleanup cleanup_instances(cptestctx, client, project_name, &[instance_name]).await; - cleanup_multicast_groups(client, &[group_name]).await; + // Implicit deletion model: group is implicitly deleted when last member (instance) is removed + wait_for_group_deleted(client, group_name).await; } /// Verify external IP allocation/deallocation lifecycle for multicast group members. @@ -246,7 +227,7 @@ async fn test_multicast_external_ip_lifecycle( let instance_name = "external-ip-lifecycle-instance"; // Setup in parallel - let (_, _, mcast_pool) = ops::join3( + let (_, _, _) = ops::join3( create_project(client, project_name), create_default_ip_pool(client), create_multicast_ip_pool_with_range( @@ -258,23 +239,7 @@ async fn test_multicast_external_ip_lifecycle( ) .await; - // Create multicast group and instance (similar to previous test) - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 101, 0, 75)); - let group_url = "/v1/multicast-groups".to_string(); - let group_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: group_name.parse().unwrap(), - description: "Group for external IP lifecycle test".to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; - - object_create::<_, MulticastGroup>(client, &group_url, &group_params).await; - wait_for_group_active(client, group_name).await; - + // Create instance let instance_params = InstanceCreate { identity: IdentityMetadataCreateParams { name: instance_name.parse().unwrap(), @@ -312,12 +277,14 @@ async fn test_multicast_external_ip_lifecycle( ensure_multicast_test_ready(cptestctx).await; wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; + // Add instance to multicast group let member_add_url = format!( "{}?project={project_name}", mcast_group_members_url(group_name) ); let member_params = MulticastGroupMemberAdd { instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: None, }; object_create::<_, MulticastGroupMember>( @@ -326,7 +293,16 @@ async fn test_multicast_external_ip_lifecycle( &member_params, ) .await; - wait_for_multicast_reconciler(&cptestctx.lockstep_client).await; + wait_for_group_active(client, group_name).await; + + // Wait for member to transition from "Joining"->"Joined" + wait_for_member_state( + cptestctx, + group_name, + instance_id, + nexus_db_model::MulticastGroupMemberState::Joined, + ) + .await; // Verify initial multicast state let initial_members = @@ -409,12 +385,12 @@ async fn test_multicast_external_ip_lifecycle( ); } - // Cleanup cleanup_instances(cptestctx, client, project_name, &[instance_name]).await; - cleanup_multicast_groups(client, &[group_name]).await; + wait_for_group_deleted(client, group_name).await; } -/// Verify instances can be created with both external IP and multicast group simultaneously. +/// Verify instances can be created with both external IP and multicast group +/// simultaneously. /// /// Instance creation with both features works without conflicts during initial setup, /// and both features are properly configured from creation. @@ -430,7 +406,7 @@ async fn test_multicast_with_external_ip_at_creation( let instance_name = "creation-mixed-instance"; // Setup - parallelize project and pool creation - let (_, _, mcast_pool) = ops::join3( + let (_, _, _) = ops::join3( create_project(client, project_name), create_default_ip_pool(client), create_multicast_ip_pool_with_range( @@ -442,23 +418,6 @@ async fn test_multicast_with_external_ip_at_creation( ) .await; - // Create multicast group first - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 102, 0, 100)); - let group_url = "/v1/multicast-groups".to_string(); - let group_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: group_name.parse().unwrap(), - description: "Group for creation test".to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; - - object_create::<_, MulticastGroup>(client, &group_url, &group_params).await; - wait_for_group_active(client, group_name).await; - // Create instance with external IP specified at creation let external_ip_param = ExternalIpCreate::Ephemeral { pool: None }; let instance_params = InstanceCreate { @@ -514,6 +473,7 @@ async fn test_multicast_with_external_ip_at_creation( ); let member_params = MulticastGroupMemberAdd { instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: None, }; object_create::<_, MulticastGroupMember>( @@ -522,6 +482,7 @@ async fn test_multicast_with_external_ip_at_creation( &member_params, ) .await; + wait_for_group_active(client, group_name).await; // Verify both features work together - wait for member to reach Joined state wait_for_member_state( @@ -542,9 +503,8 @@ async fn test_multicast_with_external_ip_at_creation( "Instance should retain external IP" ); - // Cleanup cleanup_instances(cptestctx, client, project_name, &[instance_name]).await; - cleanup_multicast_groups(client, &[group_name]).await; + wait_for_group_deleted(client, group_name).await; } /// Verify instances can have both floating IPs and multicast group membership. @@ -565,7 +525,7 @@ async fn test_multicast_with_floating_ip_basic( let floating_ip_name = "floating-ip-mcast-ip"; // Setup: project and IP pools - parallelize creation - let (_, _, mcast_pool) = ops::join3( + let (_, _, _) = ops::join3( create_project(client, project_name), create_default_ip_pool(client), // For floating IPs create_multicast_ip_pool_with_range( @@ -582,23 +542,6 @@ async fn test_multicast_with_floating_ip_basic( create_floating_ip(client, floating_ip_name, project_name, None, None) .await; - // Create multicast group - let multicast_ip = IpAddr::V4(Ipv4Addr::new(224, 200, 0, 50)); - let group_url = "/v1/multicast-groups".to_string(); - let group_params = MulticastGroupCreate { - identity: IdentityMetadataCreateParams { - name: group_name.parse().unwrap(), - description: "Group for floating IP integration test".to_string(), - }, - multicast_ip: Some(multicast_ip), - source_ips: None, - pool: Some(NameOrId::Name(mcast_pool.identity.name.clone())), - mvlan: None, - }; - - object_create::<_, MulticastGroup>(client, &group_url, &group_params).await; - wait_for_group_active(client, group_name).await; - // Create instance (will start by default) let instance_params = InstanceCreate { identity: IdentityMetadataCreateParams { @@ -644,6 +587,7 @@ async fn test_multicast_with_floating_ip_basic( ); let member_params = MulticastGroupMemberAdd { instance: NameOrId::Name(instance_name.parse().unwrap()), + source_ips: None, }; object_create::<_, MulticastGroupMember>( @@ -652,6 +596,7 @@ async fn test_multicast_with_floating_ip_basic( &member_params, ) .await; + wait_for_group_active(client, group_name).await; // Wait for multicast member to reach "Joined" state wait_for_member_state( @@ -669,7 +614,7 @@ async fn test_multicast_with_floating_ip_basic( // Verify that inventory-based mapping correctly mapped sled → switch port verify_inventory_based_port_mapping(cptestctx, &instance_uuid) .await - .expect("port mapping verification should succeed"); + .expect("Port mapping verification should succeed"); // Attach floating IP to the same instance let attach_url = format!( @@ -767,7 +712,6 @@ async fn test_multicast_with_floating_ip_basic( format!("/v1/floating-ips/{floating_ip_name}?project={project_name}"); object_delete(client, &fip_delete_url).await; - // Cleanup cleanup_instances(cptestctx, client, project_name, &[instance_name]).await; - cleanup_multicast_groups(client, &[group_name]).await; + wait_for_group_deleted(client, group_name).await; } diff --git a/nexus/tests/integration_tests/unauthorized.rs b/nexus/tests/integration_tests/unauthorized.rs index 6b232ab8ecb..81037d9fcfa 100644 --- a/nexus/tests/integration_tests/unauthorized.rs +++ b/nexus/tests/integration_tests/unauthorized.rs @@ -380,11 +380,11 @@ static SETUP_REQUESTS: LazyLock> = LazyLock::new(|| { .unwrap(), id_routes: vec![], }, - // Create a multicast group in the Project + // Create a multicast group by adding a member (implicit group creation) SetupReq::Post { - url: &MULTICAST_GROUPS_URL, - body: serde_json::to_value(&*DEMO_MULTICAST_GROUP_CREATE).unwrap(), - id_routes: vec!["/v1/multicast-groups/{id}"], + url: &DEMO_MULTICAST_GROUP_MEMBERS_URL, + body: serde_json::to_value(&*DEMO_MULTICAST_MEMBER_ADD).unwrap(), + id_routes: vec![], }, // Create an affinity group in the Project SetupReq::Post { diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 9be5cbba1bb..fdc1b37639c 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -9,6 +9,12 @@ use crate::external_api::shared; use base64::Engine; use chrono::{DateTime, Utc}; use http::Uri; +use omicron_common::address::{ + IPV4_ADMIN_SCOPED_MULTICAST_SUBNET, IPV4_GLOP_MULTICAST_SUBNET, + IPV4_LINK_LOCAL_MULTICAST_SUBNET, IPV4_SPECIFIC_RESERVED_MULTICAST_ADDRS, + IPV6_INTERFACE_LOCAL_MULTICAST_SUBNET, IPV6_LINK_LOCAL_MULTICAST_SUBNET, + IPV6_RESERVED_SCOPE_MULTICAST_SUBNET, MAX_SSM_SOURCE_IPS, +}; use omicron_common::api::external::{ AddressLotKind, AffinityPolicy, AllowedSourceIps, BfdMode, BgpPeer, ByteCount, FailureDomain, Hostname, IdentityMetadataCreateParams, @@ -17,7 +23,6 @@ use omicron_common::api::external::{ Nullable, PaginationOrder, RouteDestination, RouteTarget, UserId, }; use omicron_common::disk::DiskVariant; -use omicron_common::vlan::VlanID; use omicron_uuid_kinds::*; use oxnet::{IpNet, Ipv4Net, Ipv6Net}; use parse_display::Display; @@ -27,8 +32,7 @@ use serde::{ Deserialize, Deserializer, Serialize, Serializer, de::{self, Visitor}, }; -use std::collections::BTreeMap; -use std::collections::BTreeSet; +use std::collections::{BTreeMap, BTreeSet, HashSet}; use std::num::NonZeroU32; use std::{ net::{IpAddr, Ipv4Addr, Ipv6Addr}, @@ -39,11 +43,14 @@ use uuid::Uuid; macro_rules! path_param { ($struct:ident, $param:ident, $name:tt) => { + path_param!($struct, $param, $name, NameOrId, "Name or ID of the "); + }; + ($struct:ident, $param:ident, $name:tt, $type:ty, $doc_prefix:tt) => { #[derive(Serialize, Deserialize, JsonSchema)] pub struct $struct { - #[doc = "Name or ID of the "] + #[doc = $doc_prefix] #[doc = $name] - pub $param: NameOrId, + pub $param: $type, } }; } @@ -83,7 +90,13 @@ pub struct UninitializedSledId { path_param!(AffinityGroupPath, affinity_group, "affinity group"); path_param!(AntiAffinityGroupPath, anti_affinity_group, "anti affinity group"); -path_param!(MulticastGroupPath, multicast_group, "multicast group"); +path_param!( + MulticastGroupPath, + multicast_group, + "multicast group", + MulticastGroupIdentifier, + "Name, ID, or IP address of the " +); path_param!(ProjectPath, project, "project"); path_param!(InstancePath, instance, "instance"); path_param!(NetworkInterfacePath, interface, "network interface"); @@ -239,15 +252,8 @@ pub struct FloatingIpSelector { #[derive(Deserialize, JsonSchema, Clone)] pub struct MulticastGroupSelector { - /// Name or ID of the multicast group (fleet-scoped) - pub multicast_group: NameOrId, -} - -/// Path parameter for multicast group lookup by IP address. -#[derive(Deserialize, Serialize, JsonSchema)] -pub struct MulticastGroupIpLookupPath { - /// IP address of the multicast group - pub address: IpAddr, + /// Name, ID, or IP address of the multicast group (fleet-scoped) + pub multicast_group: MulticastGroupIdentifier, } #[derive(Deserialize, JsonSchema)] @@ -1270,9 +1276,10 @@ pub struct InstanceCreate { /// /// The instance will be automatically added as a member of the specified /// multicast groups during creation, enabling it to send and receive - /// multicast traffic for those groups. + /// multicast traffic for those groups. Groups can be specified by name, + /// ID, or multicast IP address. #[serde(default)] - pub multicast_groups: Vec, + pub multicast_groups: Vec, /// A list of disks to be attached to the instance. /// @@ -1394,11 +1401,12 @@ pub struct InstanceUpdate { /// When specified, this replaces the instance's current multicast group /// membership with the new set of groups. The instance will leave any /// groups not listed here and join any new groups that are specified. + /// Groups can be specified by name, ID, or multicast IP address. /// /// If not provided (None), the instance's multicast group membership /// will not be changed. #[serde(default)] - pub multicast_groups: Option>, + pub multicast_groups: Option>, } #[inline] @@ -2805,126 +2813,136 @@ pub struct AuditLog { pub end_time: Option>, } -/// Create-time parameters for a multicast group. +/// Parameters for adding an instance to a multicast group. #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct MulticastGroupCreate { - #[serde(flatten)] - pub identity: IdentityMetadataCreateParams, - /// The multicast IP address to allocate. If None, one will be allocated - /// from the default pool. - #[serde(default, deserialize_with = "validate_multicast_ip_param")] - pub multicast_ip: Option, - /// Source IP addresses for Source-Specific Multicast (SSM). +pub struct MulticastGroupMemberAdd { + /// Name or ID of the instance to add to the multicast group + pub instance: NameOrId, + /// Optional Source IP addresses for Source-Specific Multicast (SSM). + /// + /// If the group already exists: + /// - If `source_ips` is specified, validates they match the group's sources + /// (no implicit update is performed). /// - /// None uses default behavior (Any-Source Multicast). - /// Empty list explicitly allows any source (Any-Source Multicast). - /// Non-empty list restricts to specific sources (SSM). + /// If the group doesn't exist (implicit creation): + /// - If `source_ips` is specified and non-empty, attempts to create an SSM + /// group using these sources. + /// - If omitted or empty, creates an ASM group. #[serde(default, deserialize_with = "validate_source_ips_param")] pub source_ips: Option>, - /// Name or ID of the IP pool to allocate from. If None, uses the default - /// multicast pool. - #[serde(default)] - pub pool: Option, - /// Multicast VLAN (MVLAN) for egress multicast traffic to upstream networks. - /// Tags packets leaving the rack to traverse VLAN-segmented upstream networks. - /// - /// Valid range: 2-4094 (VLAN IDs 0-1 are reserved by IEEE 802.1Q standard). - #[serde(default, deserialize_with = "validate_mvlan_option")] - pub mvlan: Option, } -/// Update-time parameters for a multicast group. +/// Path parameters for multicast group member operations. #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct MulticastGroupUpdate { - #[serde(flatten)] - pub identity: IdentityMetadataUpdateParams, - #[serde( - default, - deserialize_with = "validate_source_ips_param", - skip_serializing_if = "Option::is_none" - )] - pub source_ips: Option>, - /// Multicast VLAN (MVLAN) for egress multicast traffic to upstream networks. - /// Set to null to clear the MVLAN. Valid range: 2-4094 when provided. - /// Omit the field to leave mvlan unchanged. - #[serde( - default, - deserialize_with = "validate_mvlan_option_nullable", - skip_serializing_if = "Option::is_none" - )] - pub mvlan: Option>, +pub struct MulticastGroupMemberPath { + /// Name, ID, or IP address of the multicast group + pub multicast_group: MulticastGroupIdentifier, + /// Name or ID of the instance + pub instance: NameOrId, } -/// Parameters for adding an instance to a multicast group. -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct MulticastGroupMemberAdd { - /// Name or ID of the instance to add to the multicast group - pub instance: NameOrId, +/// Identifier for a multicast group: can be a Name, UUID, or IP address. +/// +/// This type supports the join-by-IP pattern where users can specify +/// a multicast IP address directly, and the system will auto-discover +/// the pool and find or create the group. +#[derive(Debug, Display, Clone, PartialEq)] +#[display("{0}")] +pub enum MulticastGroupIdentifier { + Id(Uuid), + Name(Name), + Ip(IpAddr), } -// MVLAN validators +impl TryFrom for MulticastGroupIdentifier { + type Error = String; -/// Dendrite requires VLAN IDs >= 2 (rejects 0 and 1) -/// -/// Valid range is 2-4094 -fn validate_mvlan(vlan_id: VlanID) -> Result { - let value: u16 = vlan_id.into(); - if value >= 2 { - Ok(vlan_id) - } else { - Err(format!( - "invalid mvlan: {value} (must be >= 2, VLAN IDs 0-1 are reserved)" - )) + fn try_from(value: String) -> Result { + if let Ok(id) = Uuid::parse_str(&value) { + Ok(MulticastGroupIdentifier::Id(id)) + } else if let Ok(ip) = value.parse::() { + Ok(MulticastGroupIdentifier::Ip(ip)) + } else { + Ok(MulticastGroupIdentifier::Name(Name::try_from(value)?)) + } } } -fn validate_mvlan_option<'de, D>( - deserializer: D, -) -> Result, D::Error> -where - D: serde::Deserializer<'de>, -{ - let opt = Option::::deserialize(deserializer)?; - match opt { - Some(v) => { - validate_mvlan(v).map(Some).map_err(serde::de::Error::custom) +impl FromStr for MulticastGroupIdentifier { + type Err = String; + + fn from_str(value: &str) -> Result { + MulticastGroupIdentifier::try_from(String::from(value)) + } +} + +impl From for MulticastGroupIdentifier { + fn from(name: Name) -> Self { + MulticastGroupIdentifier::Name(name) + } +} + +impl From for MulticastGroupIdentifier { + fn from(id: Uuid) -> Self { + MulticastGroupIdentifier::Id(id) + } +} + +impl From for MulticastGroupIdentifier { + fn from(ip: IpAddr) -> Self { + MulticastGroupIdentifier::Ip(ip) + } +} + +impl From for MulticastGroupIdentifier { + fn from(value: NameOrId) -> Self { + match value { + NameOrId::Name(name) => MulticastGroupIdentifier::Name(name), + NameOrId::Id(id) => MulticastGroupIdentifier::Id(id), } - None => Ok(None), } } -fn validate_mvlan_option_nullable<'de, D>( - deserializer: D, -) -> Result>, D::Error> -where - D: serde::Deserializer<'de>, -{ - // Deserialize as Nullable directly, which handles null properly - // When field has null value, Nullable deserializer returns Nullable(None) - // We always wrap in Some because if field is present, we got here - let nullable = Nullable::::deserialize(deserializer)?; - match nullable.0 { - Some(v) => validate_mvlan(v) - .map(|vv| Some(Nullable(Some(vv)))) - .map_err(serde::de::Error::custom), - None => Ok(Some(Nullable(None))), // Explicit null to clear +impl Serialize for MulticastGroupIdentifier { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&self.to_string()) } } -/// Parameters for removing an instance from a multicast group. -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct MulticastGroupMemberRemove { - /// Name or ID of the instance to remove from the multicast group - pub instance: NameOrId, +impl<'de> Deserialize<'de> for MulticastGroupIdentifier { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + MulticastGroupIdentifier::try_from(s).map_err(de::Error::custom) + } } -/// Path parameters for multicast group member operations. -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct MulticastGroupMemberPath { - /// Name or ID of the multicast group - pub multicast_group: NameOrId, - /// Name or ID of the instance - pub instance: NameOrId, +impl JsonSchema for MulticastGroupIdentifier { + fn schema_name() -> String { + "MulticastGroupIdentifier".to_string() + } + + fn json_schema( + _generator: &mut schemars::r#gen::SchemaGenerator, + ) -> schemars::schema::Schema { + schemars::schema::SchemaObject { + instance_type: Some(schemars::schema::InstanceType::String.into()), + metadata: Some(Box::new(schemars::schema::Metadata { + title: Some("A multicast group identifier".to_string()), + description: Some( + "Can be a UUID, a name, or an IP address".to_string(), + ), + ..Default::default() + })), + ..Default::default() + } + .into() + } } /// Path parameters for instance multicast group operations. @@ -2932,8 +2950,26 @@ pub struct MulticastGroupMemberPath { pub struct InstanceMulticastGroupPath { /// Name or ID of the instance pub instance: NameOrId, - /// Name or ID of the multicast group - pub multicast_group: NameOrId, + /// Name, ID, or IP address of the multicast group + pub multicast_group: MulticastGroupIdentifier, +} + +/// Parameters for joining an instance to a multicast group. +/// +/// When joining by IP address, the pool containing the multicast IP is +/// auto-discovered from all linked multicast pools. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, Default)] +pub struct InstanceMulticastGroupJoin { + /// Optional Source IP addresses for Source-Specific Multicast (SSM). + /// + /// If the group already exists: + /// - Validates sources match the group's existing configuration + /// + /// If the group doesn't exist (implicit creation): + /// - Non-empty list creates an SSM group with these sources + /// - Empty or omitted creates an ASM group + #[serde(default, deserialize_with = "validate_source_ips_param")] + pub source_ips: Option>, } /// Validate that an IP address is suitable for use as a SSM source. @@ -2993,79 +3029,77 @@ pub fn validate_multicast_ip(ip: IpAddr) -> Result<(), String> { } } -// IPv4 link-local multicast range reserved for local network control. -const RESERVED_IPV4_MULTICAST_LINK_LOCAL: Ipv4Addr = - Ipv4Addr::new(224, 0, 0, 0); -const RESERVED_IPV4_MULTICAST_LINK_LOCAL_PREFIX: u8 = 24; - /// Validates IPv4 multicast addresses. +/// +/// Checks that the address is multicast and not in a reserved range. These +/// checks are also enforced at IP pool creation time, but we validate here +/// too for better error messages at the API layer. fn validate_ipv4_multicast(addr: Ipv4Addr) -> Result<(), String> { - // Verify this is actually a multicast address if !addr.is_multicast() { - return Err(format!("{} is not a multicast address", addr)); + return Err(format!("{addr} is not a multicast address")); } - // Block link-local multicast (224.0.0.0/24) as it's reserved for local network control - let link_local = Ipv4Net::new( - RESERVED_IPV4_MULTICAST_LINK_LOCAL, - RESERVED_IPV4_MULTICAST_LINK_LOCAL_PREFIX, - ) - .unwrap(); - if link_local.contains(addr) { + // Check reserved subnets + if IPV4_LINK_LOCAL_MULTICAST_SUBNET.contains(addr) { return Err(format!( "{addr} is in the link-local multicast range (224.0.0.0/24)" )); } + if IPV4_GLOP_MULTICAST_SUBNET.contains(addr) { + return Err(format!( + "{addr} is in the GLOP multicast range (233.0.0.0/8)" + )); + } + if IPV4_ADMIN_SCOPED_MULTICAST_SUBNET.contains(addr) { + return Err(format!( + "{addr} is in the admin-scoped multicast range (239.0.0.0/8)" + )); + } + + // Check specific reserved addresses (NTP, Cisco Auto-RP, PTP) + if IPV4_SPECIFIC_RESERVED_MULTICAST_ADDRS.contains(&addr) { + return Err(format!( + "{addr} is a specifically reserved multicast address" + )); + } Ok(()) } /// Validates IPv6 multicast addresses. +/// +/// Checks that the address is multicast and not in a reserved range. These +/// checks are also enforced at IP pool creation time, but we validate here +/// too for better error messages at the API layer. fn validate_ipv6_multicast(addr: Ipv6Addr) -> Result<(), String> { if !addr.is_multicast() { return Err(format!("{addr} is not a multicast address")); } - // Define reserved IPv6 multicast subnets using oxnet - let reserved_subnets = [ - // Interface-local scope (ff01::/16) - Ipv6Net::new(Ipv6Addr::new(0xff01, 0, 0, 0, 0, 0, 0, 0), 16).unwrap(), - // Link-local scope (ff02::/16) - Ipv6Net::new(Ipv6Addr::new(0xff02, 0, 0, 0, 0, 0, 0, 0), 16).unwrap(), - ]; - // Check reserved subnets - for subnet in &reserved_subnets { - if subnet.contains(addr) { - return Err(format!( - "{} is in the reserved multicast subnet {}", - addr, subnet - )); - } + if IPV6_RESERVED_SCOPE_MULTICAST_SUBNET.contains(addr) { + return Err(format!( + "{addr} is in the reserved-scope multicast range (ff00::/16)" + )); + } + if IPV6_INTERFACE_LOCAL_MULTICAST_SUBNET.contains(addr) { + return Err(format!( + "{addr} is in the interface-local multicast range (ff01::/16)" + )); + } + if IPV6_LINK_LOCAL_MULTICAST_SUBNET.contains(addr) { + return Err(format!( + "{addr} is in the link-local multicast range (ff02::/16)" + )); } - - // Note: Admin-local scope (ff04::/16) is allowed for on-premises deployments. - // Collision avoidance with underlay addresses is handled by the mapping - // function which sets a collision-avoidance bit in the underlay space. Ok(()) } -/// Deserializer for validating multicast IP addresses. -fn validate_multicast_ip_param<'de, D>( - deserializer: D, -) -> Result, D::Error> -where - D: Deserializer<'de>, -{ - let ip_opt = Option::::deserialize(deserializer)?; - if let Some(ip) = ip_opt { - validate_multicast_ip(ip).map_err(|e| de::Error::custom(e))?; - } - Ok(ip_opt) -} - /// Deserializer for validating source IP addresses. +/// +/// This function validates each IP, deduplicates the list, and enforces the +/// maximum limit of [`MAX_SSM_SOURCE_IPS`] per RFC 3376. fn validate_source_ips_param<'de, D>( deserializer: D, ) -> Result>, D::Error> @@ -3073,12 +3107,34 @@ where D: Deserializer<'de>, { let ips_opt = Option::>::deserialize(deserializer)?; - if let Some(ref ips) = ips_opt { - for ip in ips { + if let Some(ips) = ips_opt { + // Validate each IP and deduplicate + let mut seen = HashSet::new(); + for ip in &ips { validate_source_ip(*ip).map_err(|e| de::Error::custom(e))?; + seen.insert(*ip); + } + + // Check max limit after deduplication + if seen.len() > MAX_SSM_SOURCE_IPS { + return Err(de::Error::custom(format!( + "too many source IPs: {} exceeds maximum of {MAX_SSM_SOURCE_IPS} per RFC 3376", + seen.len(), + ))); } + + // Return deduplicated list preserving original order + let mut deduped = Vec::with_capacity(seen.len()); + let mut added = HashSet::new(); + for ip in ips { + if added.insert(ip) { + deduped.push(ip); + } + } + Ok(Some(deduped)) + } else { + Ok(None) } - Ok(ips_opt) } const fn is_unicast_v4(ip: &Ipv4Addr) -> bool { @@ -3112,46 +3168,82 @@ mod tests { #[test] fn test_validate_multicast_ip_v4() { - // Valid IPv4 multicast addresses + // Valid IPv4 multicast addresses (ASM range) assert!( validate_multicast_ip(IpAddr::V4(Ipv4Addr::new(224, 1, 0, 1))) - .is_ok() + .is_ok(), + "224.1.0.1 should be valid ASM" ); assert!( validate_multicast_ip(IpAddr::V4(Ipv4Addr::new(225, 2, 3, 4))) - .is_ok() + .is_ok(), + "225.2.3.4 should be valid ASM" ); assert!( validate_multicast_ip(IpAddr::V4(Ipv4Addr::new(231, 5, 6, 7))) - .is_ok() + .is_ok(), + "231.5.6.7 should be valid ASM" ); + + // Invalid IPv4 multicast addresses - reserved ranges assert!( validate_multicast_ip(IpAddr::V4(Ipv4Addr::new(233, 1, 1, 1))) - .is_ok() - ); // GLOP addressing - allowed + .is_err(), + "233.1.1.1 should be rejected (GLOP addressing)" + ); assert!( validate_multicast_ip(IpAddr::V4(Ipv4Addr::new(239, 1, 1, 1))) - .is_ok() - ); // Admin-scoped - allowed - - // Invalid IPv4 multicast addresses - reserved ranges + .is_err(), + "239.1.1.1 should be rejected (admin-scoped)" + ); assert!( validate_multicast_ip(IpAddr::V4(Ipv4Addr::new(224, 0, 0, 1))) - .is_err() - ); // Link-local control + .is_err(), + "224.0.0.1 should be rejected (link-local control)" + ); assert!( validate_multicast_ip(IpAddr::V4(Ipv4Addr::new(224, 0, 0, 255))) - .is_err() - ); // Link-local control + .is_err(), + "224.0.0.255 should be rejected (link-local control)" + ); + + // Specific reserved addresses (per IANA registry) + assert!( + validate_multicast_ip(IpAddr::V4(Ipv4Addr::new(224, 0, 1, 1))) + .is_err(), + "224.0.1.1 should be rejected (NTP)" + ); + assert!( + validate_multicast_ip(IpAddr::V4(Ipv4Addr::new(224, 0, 1, 39))) + .is_err(), + "224.0.1.39 should be rejected (Cisco Auto-RP-Announce)" + ); + assert!( + validate_multicast_ip(IpAddr::V4(Ipv4Addr::new(224, 0, 1, 40))) + .is_err(), + "224.0.1.40 should be rejected (Cisco Auto-RP-Discovery)" + ); + assert!( + validate_multicast_ip(IpAddr::V4(Ipv4Addr::new(224, 0, 1, 129))) + .is_err(), + "224.0.1.129 should be rejected (PTP-primary)" + ); + assert!( + validate_multicast_ip(IpAddr::V4(Ipv4Addr::new(224, 0, 1, 130))) + .is_err(), + "224.0.1.130 should be rejected (PTP-alternate1)" + ); // Non-multicast addresses assert!( validate_multicast_ip(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1))) - .is_err() + .is_err(), + "192.168.1.1 should be rejected (unicast)" ); assert!( validate_multicast_ip(IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1))) - .is_err() + .is_err(), + "10.0.0.1 should be rejected (unicast)" ); } @@ -3162,49 +3254,63 @@ mod tests { validate_multicast_ip(IpAddr::V6(Ipv6Addr::new( 0xff0e, 0, 0, 0, 0, 0, 0, 1 ))) - .is_ok() - ); // Global scope + .is_ok(), + "ff0e::1 should be valid (global scope)" + ); assert!( validate_multicast_ip(IpAddr::V6(Ipv6Addr::new( 0xff0d, 0, 0, 0, 0, 0, 0, 1 ))) - .is_ok() - ); // Site-local scope + .is_ok(), + "ff0d::1 should be valid (site-local scope)" + ); assert!( validate_multicast_ip(IpAddr::V6(Ipv6Addr::new( 0xff05, 0, 0, 0, 0, 0, 0, 1 ))) - .is_ok() - ); // Site-local admin scope - allowed + .is_ok(), + "ff05::1 should be valid (site-local admin scope)" + ); assert!( validate_multicast_ip(IpAddr::V6(Ipv6Addr::new( 0xff08, 0, 0, 0, 0, 0, 0, 1 ))) - .is_ok() - ); // Org-local admin scope - allowed + .is_ok(), + "ff08::1 should be valid (org-local admin scope)" + ); // Invalid IPv6 multicast addresses - reserved ranges + assert!( + validate_multicast_ip(IpAddr::V6(Ipv6Addr::new( + 0xff00, 0, 0, 0, 0, 0, 0, 1 + ))) + .is_err(), + "ff00::1 should be rejected (reserved scope)" + ); assert!( validate_multicast_ip(IpAddr::V6(Ipv6Addr::new( 0xff01, 0, 0, 0, 0, 0, 0, 1 ))) - .is_err() - ); // Interface-local + .is_err(), + "ff01::1 should be rejected (interface-local)" + ); assert!( validate_multicast_ip(IpAddr::V6(Ipv6Addr::new( 0xff02, 0, 0, 0, 0, 0, 0, 1 ))) - .is_err() - ); // Link-local + .is_err(), + "ff02::1 should be rejected (link-local)" + ); // Admin-local (ff04::/16) is allowed for on-premises deployments. - // Collision avoidance is handled by the mapping function which sets - // a collision-avoidance bit to separate external and underlay spaces. + // Collision avoidance is handled by the XOR folding mapping function + // which produces unique underlay addresses for each external address. assert!( validate_multicast_ip(IpAddr::V6(Ipv6Addr::new( 0xff04, 0, 0, 0, 0, 0, 0, 1 ))) - .is_ok() + .is_ok(), + "ff04::1 should be valid (admin-local allowed)" ); // Non-multicast addresses @@ -3212,7 +3318,8 @@ mod tests { validate_multicast_ip(IpAddr::V6(Ipv6Addr::new( 0x2001, 0xdb8, 0, 0, 0, 0, 0, 1 ))) - .is_err() + .is_err(), + "2001:db8::1 should be rejected (unicast)" ); } @@ -3285,298 +3392,4 @@ mod tests { .is_err() ); // Loopback } - - #[test] - fn test_multicast_group_create_deserialization_with_all_fields() { - let json = r#"{ - "name": "test-group", - "description": "Test multicast group", - "multicast_ip": "224.1.2.3", - "source_ips": ["10.0.0.1", "10.0.0.2"], - "pool": "default", - "mvlan": 10 - }"#; - - let result: Result = - serde_json::from_str(json); - assert!(result.is_ok()); - let params = result.unwrap(); - assert_eq!(params.identity.name.as_str(), "test-group"); - assert_eq!( - params.multicast_ip, - Some(IpAddr::V4(Ipv4Addr::new(224, 1, 2, 3))) - ); - assert_eq!( - params.source_ips, - Some(vec![ - IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1)), - IpAddr::V4(Ipv4Addr::new(10, 0, 0, 2)) - ]) - ); - } - - #[test] - fn test_multicast_group_create_deserialization_without_optional_fields() { - // This is the critical test - multicast_ip, source_ips, pool, and mvlan are all optional - let json = r#"{ - "name": "test-group", - "description": "Test multicast group" - }"#; - - let result: Result = - serde_json::from_str(json); - assert!( - result.is_ok(), - "Failed to deserialize without optional fields: {:?}", - result.err() - ); - let params = result.unwrap(); - assert_eq!(params.identity.name.as_str(), "test-group"); - assert_eq!(params.multicast_ip, None); - assert_eq!(params.source_ips, None); - assert_eq!(params.pool, None); - assert_eq!(params.mvlan, None); - } - - #[test] - fn test_multicast_group_create_deserialization_with_empty_source_ips() { - let json = r#"{ - "name": "test-group", - "description": "Test multicast group", - "multicast_ip": "224.1.2.3", - "source_ips": [] - }"#; - - let result: Result = - serde_json::from_str(json); - assert!(result.is_ok()); - let params = result.unwrap(); - assert_eq!(params.source_ips, Some(vec![])); - } - - #[test] - fn test_multicast_group_create_deserialization_invalid_multicast_ip() { - // Non-multicast IP should be rejected - let json = r#"{ - "name": "test-group", - "description": "Test multicast group", - "multicast_ip": "192.168.1.1" - }"#; - - let result: Result = - serde_json::from_str(json); - assert!(result.is_err()); - } - - #[test] - fn test_multicast_group_create_deserialization_invalid_source_ip() { - // Multicast address in source_ips should be rejected - let json = r#"{ - "name": "test-group", - "description": "Test multicast group", - "multicast_ip": "224.1.2.3", - "source_ips": ["224.0.0.1"] - }"#; - - let result: Result = - serde_json::from_str(json); - assert!(result.is_err()); - } - - #[test] - fn test_multicast_group_create_deserialization_only_multicast_ip() { - // Test with only multicast_ip, no source_ips - let json = r#"{ - "name": "test-group", - "description": "Test multicast group", - "multicast_ip": "224.1.2.3" - }"#; - - let result: Result = - serde_json::from_str(json); - assert!(result.is_ok()); - let params = result.unwrap(); - assert_eq!( - params.multicast_ip, - Some(IpAddr::V4(Ipv4Addr::new(224, 1, 2, 3))) - ); - assert_eq!(params.source_ips, None); - } - - #[test] - fn test_multicast_group_create_deserialization_only_source_ips() { - // Test with only source_ips, no multicast_ip (will be auto-allocated) - let json = r#"{ - "name": "test-group", - "description": "Test multicast group", - "source_ips": ["10.0.0.1"] - }"#; - - let result: Result = - serde_json::from_str(json); - assert!(result.is_ok()); - let params = result.unwrap(); - assert_eq!(params.multicast_ip, None); - assert_eq!( - params.source_ips, - Some(vec![IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1))]) - ); - } - - #[test] - fn test_multicast_group_create_deserialization_explicit_null_fields() { - // Test with explicit null values for optional fields - // This is what the CLI sends when fields are not provided - let json = r#"{ - "name": "test-group", - "description": "Test multicast group", - "multicast_ip": null, - "source_ips": null, - "pool": null, - "mvlan": null - }"#; - - let result: Result = - serde_json::from_str(json); - assert!( - result.is_ok(), - "Failed to deserialize with explicit null fields: {:?}", - result.err() - ); - let params = result.unwrap(); - assert_eq!(params.multicast_ip, None); - assert_eq!(params.source_ips, None); - assert_eq!(params.pool, None); - assert_eq!(params.mvlan, None); - } - - #[test] - fn test_multicast_group_create_deserialization_mixed_null_and_values() { - // Test with some nulls and some values - let json = r#"{ - "name": "test-group", - "description": "Test multicast group", - "multicast_ip": "224.1.2.3", - "source_ips": [], - "pool": null, - "mvlan": 30 - }"#; - - let result: Result = - serde_json::from_str(json); - assert!(result.is_ok()); - let params = result.unwrap(); - assert_eq!( - params.multicast_ip, - Some(IpAddr::V4(Ipv4Addr::new(224, 1, 2, 3))) - ); - assert_eq!(params.source_ips, Some(vec![])); - assert_eq!(params.pool, None); - assert_eq!(params.mvlan, Some(VlanID::new(30).unwrap())); - } - - #[test] - fn test_multicast_group_update_deserialization_omit_all_fields() { - // When fields are omitted, they should be None (no change) - let json = r#"{ - "name": "test-group" - }"#; - - let result: Result = - serde_json::from_str(json); - assert!( - result.is_ok(), - "Failed to deserialize update with omitted fields: {:?}", - result.err() - ); - let params = result.unwrap(); - assert_eq!(params.source_ips, None); - assert_eq!(params.mvlan, None); - } - - #[test] - fn test_multicast_group_update_deserialization_explicit_null_mvlan() { - // When mvlan is explicitly null, it should be Some(Nullable(None)) (clearing the field) - let json = r#"{ - "name": "test-group", - "mvlan": null - }"#; - - let result: Result = - serde_json::from_str(json); - assert!( - result.is_ok(), - "Failed to deserialize update with null mvlan: {:?}", - result.err() - ); - let params = result.unwrap(); - assert_eq!(params.mvlan, Some(Nullable(None))); - } - - #[test] - fn test_multicast_group_update_deserialization_set_mvlan() { - // When mvlan has a value, it should be Some(Nullable(Some(value))) - let json = r#"{ - "name": "test-group", - "mvlan": 100 - }"#; - - let result: Result = - serde_json::from_str(json); - assert!(result.is_ok()); - let params = result.unwrap(); - assert_eq!( - params.mvlan, - Some(Nullable(Some(VlanID::new(100).unwrap()))) - ); - } - - #[test] - fn test_multicast_group_update_deserialization_update_source_ips() { - // Test updating source_ips - let json = r#"{ - "name": "test-group", - "source_ips": ["10.0.0.5", "10.0.0.6"] - }"#; - - let result: Result = - serde_json::from_str(json); - assert!(result.is_ok()); - let params = result.unwrap(); - assert_eq!( - params.source_ips, - Some(vec![ - IpAddr::V4(Ipv4Addr::new(10, 0, 0, 5)), - IpAddr::V4(Ipv4Addr::new(10, 0, 0, 6)) - ]) - ); - } - - #[test] - fn test_multicast_group_update_deserialization_clear_source_ips() { - // Empty array should clear source_ips (Any-Source Multicast) - let json = r#"{ - "name": "test-group", - "source_ips": [] - }"#; - - let result: Result = - serde_json::from_str(json); - assert!(result.is_ok()); - let params = result.unwrap(); - assert_eq!(params.source_ips, Some(vec![])); - } - - #[test] - fn test_multicast_group_update_deserialization_invalid_mvlan() { - // VLAN ID 1 should be rejected (reserved) - let json = r#"{ - "name": "test-group", - "mvlan": 1 - }"#; - - let result: Result = - serde_json::from_str(json); - assert!(result.is_err(), "Should reject reserved VLAN ID 1"); - } } diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index 7f91e98d286..4f8a3cdc8db 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -552,6 +552,7 @@ pub struct MulticastGroup { pub source_ips: Vec, /// Multicast VLAN (MVLAN) for egress multicast traffic to upstream networks. /// None means no VLAN tagging on egress. + // TODO(multicast): Remove mvlan field - being deprecated from multicast groups pub mvlan: Option, /// The ID of the IP pool this resource belongs to. pub ip_pool_id: Uuid, @@ -568,6 +569,8 @@ pub struct MulticastGroupMember { pub identity: IdentityMetadata, /// The ID of the multicast group this member belongs to. pub multicast_group_id: Uuid, + /// The multicast IP address of the group this member belongs to. + pub multicast_ip: IpAddr, /// The ID of the instance that is a member of this group. pub instance_id: Uuid, /// Current state of the multicast group membership. diff --git a/nexus/types/src/internal_api/background.rs b/nexus/types/src/internal_api/background.rs index 42264d0411b..c51f28317a8 100644 --- a/nexus/types/src/internal_api/background.rs +++ b/nexus/types/src/internal_api/background.rs @@ -161,8 +161,10 @@ pub struct MulticastGroupReconcilerStatus { /// Number of members processed ("Joining"→"Joined", "Left" with /// time_deleted→hard-deleted cleanup). pub members_processed: usize, - /// Number of members deleted (Left + time_deleted). + /// Number of members deleted ("Left" + time_deleted). pub members_deleted: usize, + /// Number of empty groups marked for deletion (implicit deletion). + pub empty_groups_marked: usize, /// Errors that occurred during reconciliation operations. pub errors: Vec, } diff --git a/nexus/types/src/lib.rs b/nexus/types/src/lib.rs index fd1f2243bfa..751be451fd2 100644 --- a/nexus/types/src/lib.rs +++ b/nexus/types/src/lib.rs @@ -36,5 +36,6 @@ pub mod fm; pub mod identity; pub mod internal_api; pub mod inventory; +pub mod multicast; pub mod quiesce; pub mod silo; diff --git a/nexus/types/src/multicast.rs b/nexus/types/src/multicast.rs new file mode 100644 index 00000000000..30d28499802 --- /dev/null +++ b/nexus/types/src/multicast.rs @@ -0,0 +1,34 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Internal multicast types used by Nexus. + +use omicron_common::api::external::IdentityMetadataCreateParams; +use omicron_common::vlan::VlanID; +use std::net::IpAddr; + +/// Internal parameters for creating a multicast group. +/// +/// Groups are created implicitly when the first member joins. This struct +/// is used internally by Nexus to pass creation parameters to the datastore. +#[derive(Clone, Debug)] +pub struct MulticastGroupCreate { + pub identity: IdentityMetadataCreateParams, + /// The multicast IP address to allocate. + /// + /// If `None`, one will be allocated from the default pool. + pub multicast_ip: Option, + /// Source IP addresses for Source-Specific Multicast (SSM). + /// + /// None uses default behavior (Any-Source Multicast). + /// Empty list explicitly allows any source (Any-Source Multicast). + /// Non-empty list restricts to specific sources (SSM). + pub source_ips: Option>, + /// Multicast VLAN (MVLAN) for egress multicast traffic to upstream networks. + /// Tags packets leaving the rack to traverse VLAN-segmented upstream networks. + /// + /// Valid range: 2-4094 (VLAN IDs 0-1 are reserved by IEEE 802.1Q standard). + // TODO(multicast): Remove mvlan field - being deprecated from multicast groups + pub mvlan: Option, +} diff --git a/openapi/nexus/nexus-2025120500.0.0-c28237.json b/openapi/nexus/nexus-2025120500.0.0-c28237.json new file mode 100644 index 00000000000..78dc21a3d41 --- /dev/null +++ b/openapi/nexus/nexus-2025120500.0.0-c28237.json @@ -0,0 +1,29048 @@ +{ + "openapi": "3.0.3", + "info": { + "title": "Oxide Region API", + "description": "API for interacting with the Oxide control plane", + "contact": { + "url": "https://oxide.computer", + "email": "api@oxide.computer" + }, + "version": "2025120500.0.0" + }, + "paths": { + "/device/auth": { + "post": { + "tags": [ + "console-auth" + ], + "summary": "Start an OAuth 2.0 Device Authorization Grant", + "description": "This endpoint is designed to be accessed from an *unauthenticated* API client. It generates and records a `device_code` and `user_code` which must be verified and confirmed prior to a token being granted.", + "operationId": "device_auth_request", + "requestBody": { + "content": { + "application/x-www-form-urlencoded": { + "schema": { + "$ref": "#/components/schemas/DeviceAuthRequest" + } + } + }, + "required": true + }, + "responses": { + "default": { + "description": "", + "content": { + "*/*": { + "schema": {} + } + } + } + } + } + }, + "/device/confirm": { + "post": { + "tags": [ + "console-auth" + ], + "summary": "Confirm an OAuth 2.0 Device Authorization Grant", + "description": "This endpoint is designed to be accessed by the user agent (browser), not the client requesting the token. So we do not actually return the token here; it will be returned in response to the poll on `/device/token`.\n\nSome special logic applies when authenticating this request with an existing device token instead of a console session: the requested TTL must not produce an expiration time later than the authenticating token's expiration. If no TTL was specified in the initial grant request, the expiration will be the lesser of the silo max and the authenticating token's expiration time. To get the longest allowed lifetime, omit the TTL and authenticate with a web console session.", + "operationId": "device_auth_confirm", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeviceAuthVerify" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/device/token": { + "post": { + "tags": [ + "console-auth" + ], + "summary": "Request a device access token", + "description": "This endpoint should be polled by the client until the user code is verified and the grant is confirmed.", + "operationId": "device_access_token", + "requestBody": { + "content": { + "application/x-www-form-urlencoded": { + "schema": { + "$ref": "#/components/schemas/DeviceAccessTokenRequest" + } + } + }, + "required": true + }, + "responses": { + "default": { + "description": "", + "content": { + "*/*": { + "schema": {} + } + } + } + } + } + }, + "/experimental/v1/probes": { + "get": { + "tags": [ + "experimental" + ], + "summary": "List instrumentation probes", + "operationId": "probe_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ProbeInfoResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [ + "project" + ] + } + }, + "post": { + "tags": [ + "experimental" + ], + "summary": "Create instrumentation probe", + "operationId": "probe_create", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ProbeCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Probe" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/experimental/v1/probes/{probe}": { + "get": { + "tags": [ + "experimental" + ], + "summary": "View instrumentation probe", + "operationId": "probe_view", + "parameters": [ + { + "in": "path", + "name": "probe", + "description": "Name or ID of the probe", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ProbeInfo" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "experimental" + ], + "summary": "Delete instrumentation probe", + "operationId": "probe_delete", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "probe", + "description": "Name or ID of the probe", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/experimental/v1/system/support-bundles": { + "get": { + "tags": [ + "experimental" + ], + "summary": "List all support bundles", + "operationId": "support_bundle_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/TimeAndIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SupportBundleInfoResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + }, + "post": { + "tags": [ + "experimental" + ], + "summary": "Create a new support bundle", + "operationId": "support_bundle_create", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SupportBundleCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SupportBundleInfo" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/experimental/v1/system/support-bundles/{bundle_id}": { + "get": { + "tags": [ + "experimental" + ], + "summary": "View a support bundle", + "operationId": "support_bundle_view", + "parameters": [ + { + "in": "path", + "name": "bundle_id", + "description": "ID of the support bundle", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SupportBundleInfo" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "experimental" + ], + "summary": "Update a support bundle", + "operationId": "support_bundle_update", + "parameters": [ + { + "in": "path", + "name": "bundle_id", + "description": "ID of the support bundle", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SupportBundleUpdate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SupportBundleInfo" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "experimental" + ], + "summary": "Delete an existing support bundle", + "description": "May also be used to cancel a support bundle which is currently being collected, or to remove metadata for a support bundle that has failed.", + "operationId": "support_bundle_delete", + "parameters": [ + { + "in": "path", + "name": "bundle_id", + "description": "ID of the support bundle", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/experimental/v1/system/support-bundles/{bundle_id}/download": { + "get": { + "tags": [ + "experimental" + ], + "summary": "Download the contents of a support bundle", + "operationId": "support_bundle_download", + "parameters": [ + { + "in": "header", + "name": "range", + "description": "A request to access a portion of the resource, such as `bytes=0-499`\n\nSee: ", + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "bundle_id", + "description": "ID of the support bundle", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "default": { + "description": "", + "content": { + "*/*": { + "schema": {} + } + } + } + } + }, + "head": { + "tags": [ + "experimental" + ], + "summary": "Download the metadata of a support bundle", + "operationId": "support_bundle_head", + "parameters": [ + { + "in": "header", + "name": "range", + "description": "A request to access a portion of the resource, such as `bytes=0-499`\n\nSee: ", + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "bundle_id", + "description": "ID of the support bundle", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "default": { + "description": "", + "content": { + "*/*": { + "schema": {} + } + } + } + } + } + }, + "/experimental/v1/system/support-bundles/{bundle_id}/download/{file}": { + "get": { + "tags": [ + "experimental" + ], + "summary": "Download a file within a support bundle", + "operationId": "support_bundle_download_file", + "parameters": [ + { + "in": "header", + "name": "range", + "description": "A request to access a portion of the resource, such as `bytes=0-499`\n\nSee: ", + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "bundle_id", + "description": "ID of the support bundle", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "path", + "name": "file", + "description": "The file within the bundle to download", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "default": { + "description": "", + "content": { + "*/*": { + "schema": {} + } + } + } + } + }, + "head": { + "tags": [ + "experimental" + ], + "summary": "Download the metadata of a file within the support bundle", + "operationId": "support_bundle_head_file", + "parameters": [ + { + "in": "header", + "name": "range", + "description": "A request to access a portion of the resource, such as `bytes=0-499`\n\nSee: ", + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "bundle_id", + "description": "ID of the support bundle", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "path", + "name": "file", + "description": "The file within the bundle to download", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "default": { + "description": "", + "content": { + "*/*": { + "schema": {} + } + } + } + } + } + }, + "/experimental/v1/system/support-bundles/{bundle_id}/index": { + "get": { + "tags": [ + "experimental" + ], + "summary": "Download the index of a support bundle", + "operationId": "support_bundle_index", + "parameters": [ + { + "in": "header", + "name": "range", + "description": "A request to access a portion of the resource, such as `bytes=0-499`\n\nSee: ", + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "bundle_id", + "description": "ID of the support bundle", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "default": { + "description": "", + "content": { + "*/*": { + "schema": {} + } + } + } + } + } + }, + "/login/{silo_name}/saml/{provider_name}": { + "post": { + "tags": [ + "login" + ], + "summary": "Authenticate a user via SAML", + "operationId": "login_saml", + "parameters": [ + { + "in": "path", + "name": "provider_name", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + } + }, + { + "in": "path", + "name": "silo_name", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + } + } + ], + "requestBody": { + "content": { + "application/octet-stream": { + "schema": { + "type": "string", + "format": "binary" + } + } + }, + "required": true + }, + "responses": { + "303": { + "description": "redirect (see other)", + "headers": { + "location": { + "description": "HTTP \"Location\" header", + "style": "simple", + "required": true, + "schema": { + "type": "string" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/affinity-groups": { + "get": { + "tags": [ + "experimental" + ], + "summary": "List affinity groups", + "operationId": "affinity_group_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AffinityGroupResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [ + "project" + ] + } + }, + "post": { + "tags": [ + "experimental" + ], + "summary": "Create affinity group", + "operationId": "affinity_group_create", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AffinityGroupCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AffinityGroup" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/affinity-groups/{affinity_group}": { + "get": { + "tags": [ + "experimental" + ], + "summary": "Fetch affinity group", + "operationId": "affinity_group_view", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "affinity_group", + "description": "Name or ID of the affinity group", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AffinityGroup" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "experimental" + ], + "summary": "Update affinity group", + "operationId": "affinity_group_update", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "affinity_group", + "description": "Name or ID of the affinity group", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AffinityGroupUpdate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AffinityGroup" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "experimental" + ], + "summary": "Delete affinity group", + "operationId": "affinity_group_delete", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "affinity_group", + "description": "Name or ID of the affinity group", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/affinity-groups/{affinity_group}/members": { + "get": { + "tags": [ + "experimental" + ], + "summary": "List affinity group members", + "operationId": "affinity_group_member_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + }, + { + "in": "path", + "name": "affinity_group", + "description": "Name or ID of the affinity group", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AffinityGroupMemberResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/affinity-groups/{affinity_group}/members/instance/{instance}": { + "get": { + "tags": [ + "experimental" + ], + "summary": "Fetch affinity group member", + "operationId": "affinity_group_member_instance_view", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "affinity_group", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AffinityGroupMember" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "post": { + "tags": [ + "experimental" + ], + "summary": "Add member to affinity group", + "operationId": "affinity_group_member_instance_add", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "affinity_group", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AffinityGroupMember" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "experimental" + ], + "summary": "Remove member from affinity group", + "operationId": "affinity_group_member_instance_delete", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "affinity_group", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/alert-classes": { + "get": { + "tags": [ + "system/alerts" + ], + "summary": "List alert classes", + "operationId": "alert_class_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "filter", + "description": "An optional glob pattern for filtering alert class names.\n\nIf provided, only alert classes which match this glob pattern will be included in the response.", + "schema": { + "$ref": "#/components/schemas/AlertSubscription" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AlertClassResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/alert-receivers": { + "get": { + "tags": [ + "system/alerts" + ], + "summary": "List alert receivers", + "operationId": "alert_receiver_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AlertReceiverResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/alert-receivers/{receiver}": { + "get": { + "tags": [ + "system/alerts" + ], + "summary": "Fetch alert receiver", + "operationId": "alert_receiver_view", + "parameters": [ + { + "in": "path", + "name": "receiver", + "description": "The name or ID of the webhook receiver.", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AlertReceiver" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "system/alerts" + ], + "summary": "Delete alert receiver", + "operationId": "alert_receiver_delete", + "parameters": [ + { + "in": "path", + "name": "receiver", + "description": "The name or ID of the webhook receiver.", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/alert-receivers/{receiver}/deliveries": { + "get": { + "tags": [ + "system/alerts" + ], + "summary": "List delivery attempts to alert receiver", + "description": "Optional query parameters to this endpoint may be used to filter deliveries by state. If none of the `failed`, `pending` or `delivered` query parameters are present, all deliveries are returned. If one or more of these parameters are provided, only those which are set to \"true\" are included in the response.", + "operationId": "alert_delivery_list", + "parameters": [ + { + "in": "path", + "name": "receiver", + "description": "The name or ID of the webhook receiver.", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "delivered", + "description": "If true, include deliveries which have succeeded.\n\nIf any of the \"pending\", \"failed\", or \"delivered\" query parameters are set to true, only deliveries matching those state(s) will be included in the response. If NO state filter parameters are set, then all deliveries are included.", + "schema": { + "nullable": true, + "type": "boolean" + } + }, + { + "in": "query", + "name": "failed", + "description": "If true, include deliveries which have failed permanently.\n\nIf any of the \"pending\", \"failed\", or \"delivered\" query parameters are set to true, only deliveries matching those state(s) will be included in the response. If NO state filter parameters are set, then all deliveries are included.\n\nA delivery fails permanently when the retry limit of three total attempts is reached without a successful delivery.", + "schema": { + "nullable": true, + "type": "boolean" + } + }, + { + "in": "query", + "name": "pending", + "description": "If true, include deliveries which are currently in progress.\n\nIf any of the \"pending\", \"failed\", or \"delivered\" query parameters are set to true, only deliveries matching those state(s) will be included in the response. If NO state filter parameters are set, then all deliveries are included.\n\nA delivery is considered \"pending\" if it has not yet been sent at all, or if a delivery attempt has failed but the delivery has retries remaining.", + "schema": { + "nullable": true, + "type": "boolean" + } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/TimeAndIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AlertDeliveryResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/alert-receivers/{receiver}/probe": { + "post": { + "tags": [ + "system/alerts" + ], + "summary": "Send liveness probe to alert receiver", + "description": "This endpoint synchronously sends a liveness probe to the selected alert receiver. The response message describes the outcome of the probe: either the successful response (as appropriate), or indication of why the probe failed.\n\nThe result of the probe is represented as an `AlertDelivery` model. Details relating to the status of the probe depend on the alert delivery mechanism, and are included in the `AlertDeliveryAttempts` model. For example, webhook receiver liveness probes include the HTTP status code returned by the receiver endpoint.\n\nNote that the response status is `200 OK` as long as a probe request was able to be sent to the receiver endpoint. If an HTTP-based receiver, such as a webhook, responds to the another status code, including an error, this will be indicated by the response body, *not* the status of the response.\n\nThe `resend` query parameter can be used to request re-delivery of failed events if the liveness probe succeeds. If it is set to true and the liveness probe succeeds, any alerts for which delivery to this receiver has failed will be queued for re-delivery.", + "operationId": "alert_receiver_probe", + "parameters": [ + { + "in": "path", + "name": "receiver", + "description": "The name or ID of the webhook receiver.", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "resend", + "description": "If true, resend all events that have not been delivered successfully if the probe request succeeds.", + "schema": { + "type": "boolean" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AlertProbeResult" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/alert-receivers/{receiver}/subscriptions": { + "post": { + "tags": [ + "system/alerts" + ], + "summary": "Add alert receiver subscription", + "operationId": "alert_receiver_subscription_add", + "parameters": [ + { + "in": "path", + "name": "receiver", + "description": "The name or ID of the webhook receiver.", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AlertSubscriptionCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AlertSubscriptionCreated" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/alert-receivers/{receiver}/subscriptions/{subscription}": { + "delete": { + "tags": [ + "system/alerts" + ], + "summary": "Remove alert receiver subscription", + "operationId": "alert_receiver_subscription_remove", + "parameters": [ + { + "in": "path", + "name": "receiver", + "description": "The name or ID of the webhook receiver.", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "subscription", + "description": "The event class subscription itself.", + "required": true, + "schema": { + "$ref": "#/components/schemas/AlertSubscription" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/alerts/{alert_id}/resend": { + "post": { + "tags": [ + "system/alerts" + ], + "summary": "Request re-delivery of alert", + "operationId": "alert_delivery_resend", + "parameters": [ + { + "in": "path", + "name": "alert_id", + "description": "UUID of the alert", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "query", + "name": "receiver", + "description": "The name or ID of the webhook receiver.", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AlertDeliveryId" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/anti-affinity-groups": { + "get": { + "tags": [ + "affinity" + ], + "summary": "List anti-affinity groups", + "operationId": "anti_affinity_group_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AntiAffinityGroupResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [ + "project" + ] + } + }, + "post": { + "tags": [ + "affinity" + ], + "summary": "Create anti-affinity group", + "operationId": "anti_affinity_group_create", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AntiAffinityGroupCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AntiAffinityGroup" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/anti-affinity-groups/{anti_affinity_group}": { + "get": { + "tags": [ + "affinity" + ], + "summary": "Fetch anti-affinity group", + "operationId": "anti_affinity_group_view", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "anti_affinity_group", + "description": "Name or ID of the anti affinity group", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AntiAffinityGroup" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "affinity" + ], + "summary": "Update anti-affinity group", + "operationId": "anti_affinity_group_update", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "anti_affinity_group", + "description": "Name or ID of the anti affinity group", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AntiAffinityGroupUpdate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AntiAffinityGroup" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "affinity" + ], + "summary": "Delete anti-affinity group", + "operationId": "anti_affinity_group_delete", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "anti_affinity_group", + "description": "Name or ID of the anti affinity group", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/anti-affinity-groups/{anti_affinity_group}/members": { + "get": { + "tags": [ + "affinity" + ], + "summary": "List anti-affinity group members", + "operationId": "anti_affinity_group_member_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + }, + { + "in": "path", + "name": "anti_affinity_group", + "description": "Name or ID of the anti affinity group", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AntiAffinityGroupMemberResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/anti-affinity-groups/{anti_affinity_group}/members/instance/{instance}": { + "get": { + "tags": [ + "affinity" + ], + "summary": "Fetch anti-affinity group member", + "operationId": "anti_affinity_group_member_instance_view", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "anti_affinity_group", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AntiAffinityGroupMember" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "post": { + "tags": [ + "affinity" + ], + "summary": "Add member to anti-affinity group", + "operationId": "anti_affinity_group_member_instance_add", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "anti_affinity_group", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AntiAffinityGroupMember" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "affinity" + ], + "summary": "Remove member from anti-affinity group", + "operationId": "anti_affinity_group_member_instance_delete", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "anti_affinity_group", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/auth-settings": { + "get": { + "tags": [ + "silos" + ], + "summary": "Fetch current silo's auth settings", + "operationId": "auth_settings_view", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SiloAuthSettings" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "silos" + ], + "summary": "Update current silo's auth settings", + "operationId": "auth_settings_update", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SiloAuthSettingsUpdate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SiloAuthSettings" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/certificates": { + "get": { + "tags": [ + "silos" + ], + "summary": "List certificates for external endpoints", + "description": "Returns a list of TLS certificates used for the external API (for the current Silo). These are sorted by creation date, with the most recent certificates appearing first.", + "operationId": "certificate_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CertificateResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + }, + "post": { + "tags": [ + "silos" + ], + "summary": "Create new system-wide x.509 certificate", + "description": "This certificate is automatically used by the Oxide Control plane to serve external connections.", + "operationId": "certificate_create", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CertificateCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Certificate" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/certificates/{certificate}": { + "get": { + "tags": [ + "silos" + ], + "summary": "Fetch certificate", + "description": "Returns the details of a specific certificate", + "operationId": "certificate_view", + "parameters": [ + { + "in": "path", + "name": "certificate", + "description": "Name or ID of the certificate", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Certificate" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "silos" + ], + "summary": "Delete certificate", + "description": "Permanently delete a certificate. This operation cannot be undone.", + "operationId": "certificate_delete", + "parameters": [ + { + "in": "path", + "name": "certificate", + "description": "Name or ID of the certificate", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/disks": { + "get": { + "tags": [ + "disks" + ], + "summary": "List disks", + "operationId": "disk_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DiskResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [ + "project" + ] + } + }, + "post": { + "tags": [ + "disks" + ], + "summary": "Create a disk", + "operationId": "disk_create", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DiskCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Disk" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/disks/{disk}": { + "get": { + "tags": [ + "disks" + ], + "summary": "Fetch disk", + "operationId": "disk_view", + "parameters": [ + { + "in": "path", + "name": "disk", + "description": "Name or ID of the disk", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Disk" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "disks" + ], + "summary": "Delete disk", + "operationId": "disk_delete", + "parameters": [ + { + "in": "path", + "name": "disk", + "description": "Name or ID of the disk", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/disks/{disk}/bulk-write": { + "post": { + "tags": [ + "disks" + ], + "summary": "Import blocks into disk", + "operationId": "disk_bulk_write_import", + "parameters": [ + { + "in": "path", + "name": "disk", + "description": "Name or ID of the disk", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImportBlocksBulkWrite" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/disks/{disk}/bulk-write-start": { + "post": { + "tags": [ + "disks" + ], + "summary": "Start importing blocks into disk", + "description": "Start the process of importing blocks into a disk", + "operationId": "disk_bulk_write_import_start", + "parameters": [ + { + "in": "path", + "name": "disk", + "description": "Name or ID of the disk", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/disks/{disk}/bulk-write-stop": { + "post": { + "tags": [ + "disks" + ], + "summary": "Stop importing blocks into disk", + "description": "Stop the process of importing blocks into a disk", + "operationId": "disk_bulk_write_import_stop", + "parameters": [ + { + "in": "path", + "name": "disk", + "description": "Name or ID of the disk", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/disks/{disk}/finalize": { + "post": { + "tags": [ + "disks" + ], + "summary": "Confirm disk block import completion", + "operationId": "disk_finalize_import", + "parameters": [ + { + "in": "path", + "name": "disk", + "description": "Name or ID of the disk", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FinalizeDisk" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/floating-ips": { + "get": { + "tags": [ + "floating-ips" + ], + "summary": "List floating IPs", + "operationId": "floating_ip_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FloatingIpResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [ + "project" + ] + } + }, + "post": { + "tags": [ + "floating-ips" + ], + "summary": "Create floating IP", + "operationId": "floating_ip_create", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FloatingIpCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FloatingIp" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/floating-ips/{floating_ip}": { + "get": { + "tags": [ + "floating-ips" + ], + "summary": "Fetch floating IP", + "operationId": "floating_ip_view", + "parameters": [ + { + "in": "path", + "name": "floating_ip", + "description": "Name or ID of the floating IP", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FloatingIp" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "floating-ips" + ], + "summary": "Update floating IP", + "operationId": "floating_ip_update", + "parameters": [ + { + "in": "path", + "name": "floating_ip", + "description": "Name or ID of the floating IP", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FloatingIpUpdate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FloatingIp" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "floating-ips" + ], + "summary": "Delete floating IP", + "operationId": "floating_ip_delete", + "parameters": [ + { + "in": "path", + "name": "floating_ip", + "description": "Name or ID of the floating IP", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/floating-ips/{floating_ip}/attach": { + "post": { + "tags": [ + "floating-ips" + ], + "summary": "Attach floating IP", + "description": "Attach floating IP to an instance or other resource.", + "operationId": "floating_ip_attach", + "parameters": [ + { + "in": "path", + "name": "floating_ip", + "description": "Name or ID of the floating IP", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FloatingIpAttach" + } + } + }, + "required": true + }, + "responses": { + "202": { + "description": "successfully enqueued operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FloatingIp" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/floating-ips/{floating_ip}/detach": { + "post": { + "tags": [ + "floating-ips" + ], + "summary": "Detach floating IP", + "operationId": "floating_ip_detach", + "parameters": [ + { + "in": "path", + "name": "floating_ip", + "description": "Name or ID of the floating IP", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "202": { + "description": "successfully enqueued operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FloatingIp" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/groups": { + "get": { + "tags": [ + "silos" + ], + "summary": "List groups", + "operationId": "group_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GroupResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/groups/{group_id}": { + "get": { + "tags": [ + "silos" + ], + "summary": "Fetch group", + "operationId": "group_view", + "parameters": [ + { + "in": "path", + "name": "group_id", + "description": "ID of the group", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Group" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/images": { + "get": { + "tags": [ + "images" + ], + "summary": "List images", + "description": "List images which are global or scoped to the specified project. The images are returned sorted by creation date, with the most recent images appearing first.", + "operationId": "image_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + }, + "post": { + "tags": [ + "images" + ], + "summary": "Create image", + "description": "Create a new image in a project.", + "operationId": "image_create", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Image" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/images/{image}": { + "get": { + "tags": [ + "images" + ], + "summary": "Fetch image", + "description": "Fetch the details for a specific image in a project.", + "operationId": "image_view", + "parameters": [ + { + "in": "path", + "name": "image", + "description": "Name or ID of the image", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Image" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "images" + ], + "summary": "Delete image", + "description": "Permanently delete an image from a project. This operation cannot be undone. Any instances in the project using the image will continue to run, however new instances can not be created with this image.", + "operationId": "image_delete", + "parameters": [ + { + "in": "path", + "name": "image", + "description": "Name or ID of the image", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/images/{image}/demote": { + "post": { + "tags": [ + "images" + ], + "summary": "Demote silo image", + "description": "Demote silo image to be visible only to a specified project", + "operationId": "image_demote", + "parameters": [ + { + "in": "path", + "name": "image", + "description": "Name or ID of the image", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "202": { + "description": "successfully enqueued operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Image" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/images/{image}/promote": { + "post": { + "tags": [ + "images" + ], + "summary": "Promote project image", + "description": "Promote project image to be visible to all projects in the silo", + "operationId": "image_promote", + "parameters": [ + { + "in": "path", + "name": "image", + "description": "Name or ID of the image", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "202": { + "description": "successfully enqueued operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Image" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/instances": { + "get": { + "tags": [ + "instances" + ], + "summary": "List instances", + "operationId": "instance_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [ + "project" + ] + } + }, + "post": { + "tags": [ + "instances" + ], + "summary": "Create instance", + "operationId": "instance_create", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Instance" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/instances/{instance}": { + "get": { + "tags": [ + "instances" + ], + "summary": "Fetch instance", + "operationId": "instance_view", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "instance", + "description": "Name or ID of the instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Instance" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "instances" + ], + "summary": "Update instance", + "operationId": "instance_update", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "instance", + "description": "Name or ID of the instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceUpdate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Instance" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "instances" + ], + "summary": "Delete instance", + "operationId": "instance_delete", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "instance", + "description": "Name or ID of the instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/instances/{instance}/affinity-groups": { + "get": { + "tags": [ + "experimental" + ], + "summary": "List affinity groups containing instance", + "operationId": "instance_affinity_group_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + }, + { + "in": "path", + "name": "instance", + "description": "Name or ID of the instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AffinityGroupResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/instances/{instance}/anti-affinity-groups": { + "get": { + "tags": [ + "instances" + ], + "summary": "List anti-affinity groups containing instance", + "operationId": "instance_anti_affinity_group_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + }, + { + "in": "path", + "name": "instance", + "description": "Name or ID of the instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AntiAffinityGroupResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/instances/{instance}/disks": { + "get": { + "tags": [ + "instances" + ], + "summary": "List disks for instance", + "operationId": "instance_disk_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + }, + { + "in": "path", + "name": "instance", + "description": "Name or ID of the instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DiskResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/instances/{instance}/disks/attach": { + "post": { + "tags": [ + "instances" + ], + "summary": "Attach disk to instance", + "operationId": "instance_disk_attach", + "parameters": [ + { + "in": "path", + "name": "instance", + "description": "Name or ID of the instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DiskPath" + } + } + }, + "required": true + }, + "responses": { + "202": { + "description": "successfully enqueued operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Disk" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/instances/{instance}/disks/detach": { + "post": { + "tags": [ + "instances" + ], + "summary": "Detach disk from instance", + "operationId": "instance_disk_detach", + "parameters": [ + { + "in": "path", + "name": "instance", + "description": "Name or ID of the instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DiskPath" + } + } + }, + "required": true + }, + "responses": { + "202": { + "description": "successfully enqueued operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Disk" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/instances/{instance}/external-ips": { + "get": { + "tags": [ + "instances" + ], + "summary": "List external IP addresses", + "operationId": "instance_external_ip_list", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "instance", + "description": "Name or ID of the instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ExternalIpResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/instances/{instance}/external-ips/ephemeral": { + "post": { + "tags": [ + "instances" + ], + "summary": "Allocate and attach ephemeral IP to instance", + "operationId": "instance_ephemeral_ip_attach", + "parameters": [ + { + "in": "path", + "name": "instance", + "description": "Name or ID of the instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EphemeralIpCreate" + } + } + }, + "required": true + }, + "responses": { + "202": { + "description": "successfully enqueued operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ExternalIp" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "instances" + ], + "summary": "Detach and deallocate ephemeral IP from instance", + "operationId": "instance_ephemeral_ip_detach", + "parameters": [ + { + "in": "path", + "name": "instance", + "description": "Name or ID of the instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/instances/{instance}/multicast-groups": { + "get": { + "tags": [ + "experimental" + ], + "summary": "List multicast groups for an instance.", + "operationId": "instance_multicast_group_list", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "instance", + "description": "Name or ID of the instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MulticastGroupMemberResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/instances/{instance}/multicast-groups/{multicast_group}": { + "put": { + "tags": [ + "experimental" + ], + "summary": "Join a multicast group.", + "description": "This is functionally equivalent to adding the instance via the group's member management endpoint or updating the instance's `multicast_groups` field. All approaches modify the same membership and trigger reconciliation.\n\nAuthorization: requires Modify on the instance identified in the URL path (checked first) and Read on the multicast group. Checking instance permission first prevents creating orphaned groups when the instance check fails.\n\nGroup Identification: Groups can be referenced by name, IP address, or UUID. All three are fleet-wide unique identifiers: - By name: If group doesn't exist, it's implicitly created with an auto-allocated IP from a multicast pool linked to the caller's silo. Pool selection prefers the default pool; if none, selects alphabetically. - By IP: If group doesn't exist, it's implicitly created using that IP. The pool is determined by which pool contains the IP. - By UUID: Group must already exist.\n\nSource IP filtering (SSM): - Duplicate IPs in the request are automatically deduplicated. - Maximum of 64 source IPs allowed (per RFC 3376, IGMPv3). - Creating a new SSM group: `source_ips` is required. SSM addresses (232.x.x.x for IPv4, FF3x:: for IPv6) require source filtering. - Joining an existing group: If `source_ips` is omitted, the instance inherits the group's existing sources. If specified, they must exactly match the group's sources or the request fails. - Providing `source_ips` to an ASM group (or vice versa) will fail.", + "operationId": "instance_multicast_group_join", + "parameters": [ + { + "in": "path", + "name": "instance", + "description": "Name or ID of the instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "multicast_group", + "description": "Name, ID, or IP address of the multicast group", + "required": true, + "schema": { + "$ref": "#/components/schemas/MulticastGroupIdentifier" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceMulticastGroupJoin" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MulticastGroupMember" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "experimental" + ], + "summary": "Leave a multicast group.", + "description": "The group can be specified by name, UUID, or multicast IP address. All three are fleet-wide unique identifiers.\n\nThis is functionally equivalent to removing the instance via the group's member management endpoint or updating the instance's `multicast_groups` field. All approaches modify the same membership and trigger reconciliation.\n\nAuthorization: requires Modify on the instance (checked first) and Read on the multicast group.", + "operationId": "instance_multicast_group_leave", + "parameters": [ + { + "in": "path", + "name": "instance", + "description": "Name or ID of the instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "multicast_group", + "description": "Name, ID, or IP address of the multicast group", + "required": true, + "schema": { + "$ref": "#/components/schemas/MulticastGroupIdentifier" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/instances/{instance}/reboot": { + "post": { + "tags": [ + "instances" + ], + "summary": "Reboot an instance", + "operationId": "instance_reboot", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "instance", + "description": "Name or ID of the instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "202": { + "description": "successfully enqueued operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Instance" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/instances/{instance}/serial-console": { + "get": { + "tags": [ + "instances" + ], + "summary": "Fetch instance serial console", + "operationId": "instance_serial_console", + "parameters": [ + { + "in": "path", + "name": "instance", + "description": "Name or ID of the instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "from_start", + "description": "Character index in the serial buffer from which to read, counting the bytes output since instance start. If this is not provided, `most_recent` must be provided, and if this *is* provided, `most_recent` must *not* be provided.", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint64", + "minimum": 0 + } + }, + { + "in": "query", + "name": "max_bytes", + "description": "Maximum number of bytes of buffered serial console contents to return. If the requested range runs to the end of the available buffer, the data returned will be shorter than `max_bytes`.", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint64", + "minimum": 0 + } + }, + { + "in": "query", + "name": "most_recent", + "description": "Character index in the serial buffer from which to read, counting *backward* from the most recently buffered data retrieved from the instance. (See note on `from_start` about mutual exclusivity)", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint64", + "minimum": 0 + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `instance` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceSerialConsoleData" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/instances/{instance}/serial-console/stream": { + "get": { + "tags": [ + "instances" + ], + "summary": "Stream instance serial console", + "operationId": "instance_serial_console_stream", + "parameters": [ + { + "in": "path", + "name": "instance", + "description": "Name or ID of the instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "most_recent", + "description": "Character index in the serial buffer from which to read, counting *backward* from the most recently buffered data retrieved from the instance.", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint64", + "minimum": 0 + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `instance` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "default": { + "description": "", + "content": { + "*/*": { + "schema": {} + } + } + } + }, + "x-dropshot-websocket": {} + } + }, + "/v1/instances/{instance}/ssh-public-keys": { + "get": { + "tags": [ + "instances" + ], + "summary": "List SSH public keys for instance", + "description": "List SSH public keys injected via cloud-init during instance creation. Note that this list is a snapshot in time and will not reflect updates made after the instance is created.", + "operationId": "instance_ssh_public_key_list", + "parameters": [ + { + "in": "path", + "name": "instance", + "description": "Name or ID of the instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SshKeyResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/instances/{instance}/start": { + "post": { + "tags": [ + "instances" + ], + "summary": "Boot instance", + "operationId": "instance_start", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "instance", + "description": "Name or ID of the instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "202": { + "description": "successfully enqueued operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Instance" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/instances/{instance}/stop": { + "post": { + "tags": [ + "instances" + ], + "summary": "Stop instance", + "operationId": "instance_stop", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "instance", + "description": "Name or ID of the instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "202": { + "description": "successfully enqueued operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Instance" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/internet-gateway-ip-addresses": { + "get": { + "tags": [ + "vpcs" + ], + "summary": "List IP addresses attached to internet gateway", + "operationId": "internet_gateway_ip_address_list", + "parameters": [ + { + "in": "query", + "name": "gateway", + "description": "Name or ID of the internet gateway", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC, only required if `gateway` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InternetGatewayIpAddressResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [ + "gateway" + ] + } + }, + "post": { + "tags": [ + "vpcs" + ], + "summary": "Attach IP address to internet gateway", + "operationId": "internet_gateway_ip_address_create", + "parameters": [ + { + "in": "query", + "name": "gateway", + "description": "Name or ID of the internet gateway", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC, only required if `gateway` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InternetGatewayIpAddressCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InternetGatewayIpAddress" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/internet-gateway-ip-addresses/{address}": { + "delete": { + "tags": [ + "vpcs" + ], + "summary": "Detach IP address from internet gateway", + "operationId": "internet_gateway_ip_address_delete", + "parameters": [ + { + "in": "path", + "name": "address", + "description": "Name or ID of the IP address", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "cascade", + "description": "Also delete routes targeting this gateway element.", + "schema": { + "type": "boolean" + } + }, + { + "in": "query", + "name": "gateway", + "description": "Name or ID of the internet gateway", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC, only required if `gateway` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/internet-gateway-ip-pools": { + "get": { + "tags": [ + "vpcs" + ], + "summary": "List IP pools attached to internet gateway", + "operationId": "internet_gateway_ip_pool_list", + "parameters": [ + { + "in": "query", + "name": "gateway", + "description": "Name or ID of the internet gateway", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC, only required if `gateway` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InternetGatewayIpPoolResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [ + "gateway" + ] + } + }, + "post": { + "tags": [ + "vpcs" + ], + "summary": "Attach IP pool to internet gateway", + "operationId": "internet_gateway_ip_pool_create", + "parameters": [ + { + "in": "query", + "name": "gateway", + "description": "Name or ID of the internet gateway", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC, only required if `gateway` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InternetGatewayIpPoolCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InternetGatewayIpPool" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/internet-gateway-ip-pools/{pool}": { + "delete": { + "tags": [ + "vpcs" + ], + "summary": "Detach IP pool from internet gateway", + "operationId": "internet_gateway_ip_pool_delete", + "parameters": [ + { + "in": "path", + "name": "pool", + "description": "Name or ID of the IP pool", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "cascade", + "description": "Also delete routes targeting this gateway element.", + "schema": { + "type": "boolean" + } + }, + { + "in": "query", + "name": "gateway", + "description": "Name or ID of the internet gateway", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC, only required if `gateway` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/internet-gateways": { + "get": { + "tags": [ + "vpcs" + ], + "summary": "List internet gateways", + "operationId": "internet_gateway_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InternetGatewayResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [ + "vpc" + ] + } + }, + "post": { + "tags": [ + "vpcs" + ], + "summary": "Create VPC internet gateway", + "operationId": "internet_gateway_create", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InternetGatewayCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InternetGateway" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/internet-gateways/{gateway}": { + "get": { + "tags": [ + "vpcs" + ], + "summary": "Fetch internet gateway", + "operationId": "internet_gateway_view", + "parameters": [ + { + "in": "path", + "name": "gateway", + "description": "Name or ID of the gateway", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InternetGateway" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "vpcs" + ], + "summary": "Delete internet gateway", + "operationId": "internet_gateway_delete", + "parameters": [ + { + "in": "path", + "name": "gateway", + "description": "Name or ID of the gateway", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "cascade", + "description": "Also delete routes targeting this gateway.", + "schema": { + "type": "boolean" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/ip-pools": { + "get": { + "tags": [ + "projects" + ], + "summary": "List IP pools", + "operationId": "project_ip_pool_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SiloIpPoolResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/ip-pools/{pool}": { + "get": { + "tags": [ + "projects" + ], + "summary": "Fetch IP pool", + "operationId": "project_ip_pool_view", + "parameters": [ + { + "in": "path", + "name": "pool", + "description": "Name or ID of the IP pool", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SiloIpPool" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/login/{silo_name}/local": { + "post": { + "tags": [ + "login" + ], + "summary": "Authenticate a user via username and password", + "operationId": "login_local", + "parameters": [ + { + "in": "path", + "name": "silo_name", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UsernamePasswordCredentials" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/logout": { + "post": { + "tags": [ + "console-auth" + ], + "summary": "Log user out of web console by deleting session on client and server", + "operationId": "logout", + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/me": { + "get": { + "tags": [ + "current-user" + ], + "summary": "Fetch user for current session", + "operationId": "current_user_view", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CurrentUser" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/me/access-tokens": { + "get": { + "tags": [ + "tokens" + ], + "summary": "List access tokens", + "description": "List device access tokens for the currently authenticated user.", + "operationId": "current_user_access_token_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeviceAccessTokenResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/me/access-tokens/{token_id}": { + "delete": { + "tags": [ + "tokens" + ], + "summary": "Delete access token", + "description": "Delete a device access token for the currently authenticated user.", + "operationId": "current_user_access_token_delete", + "parameters": [ + { + "in": "path", + "name": "token_id", + "description": "ID of the token", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/me/groups": { + "get": { + "tags": [ + "current-user" + ], + "summary": "Fetch current user's groups", + "operationId": "current_user_groups", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GroupResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/me/ssh-keys": { + "get": { + "tags": [ + "current-user" + ], + "summary": "List SSH public keys", + "description": "Lists SSH public keys for the currently authenticated user.", + "operationId": "current_user_ssh_key_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SshKeyResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + }, + "post": { + "tags": [ + "current-user" + ], + "summary": "Create SSH public key", + "description": "Create an SSH public key for the currently authenticated user.", + "operationId": "current_user_ssh_key_create", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SshKeyCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SshKey" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/me/ssh-keys/{ssh_key}": { + "get": { + "tags": [ + "current-user" + ], + "summary": "Fetch SSH public key", + "description": "Fetch SSH public key associated with the currently authenticated user.", + "operationId": "current_user_ssh_key_view", + "parameters": [ + { + "in": "path", + "name": "ssh_key", + "description": "Name or ID of the SSH key", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SshKey" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "current-user" + ], + "summary": "Delete SSH public key", + "description": "Delete an SSH public key associated with the currently authenticated user.", + "operationId": "current_user_ssh_key_delete", + "parameters": [ + { + "in": "path", + "name": "ssh_key", + "description": "Name or ID of the SSH key", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/metrics/{metric_name}": { + "get": { + "tags": [ + "metrics" + ], + "summary": "View metrics", + "description": "View CPU, memory, or storage utilization metrics at the silo or project level.", + "operationId": "silo_metric", + "parameters": [ + { + "in": "path", + "name": "metric_name", + "required": true, + "schema": { + "$ref": "#/components/schemas/SystemMetricName" + } + }, + { + "in": "query", + "name": "end_time", + "description": "An exclusive end time of metrics.", + "schema": { + "type": "string", + "format": "date-time" + } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "order", + "description": "Query result order", + "schema": { + "$ref": "#/components/schemas/PaginationOrder" + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "start_time", + "description": "An inclusive start time of metrics.", + "schema": { + "type": "string", + "format": "date-time" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MeasurementResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [ + "end_time", + "start_time" + ] + } + } + }, + "/v1/multicast-groups": { + "get": { + "tags": [ + "experimental" + ], + "summary": "List multicast groups.", + "operationId": "multicast_group_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MulticastGroupResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/multicast-groups/{multicast_group}": { + "get": { + "tags": [ + "experimental" + ], + "summary": "Fetch a multicast group.", + "description": "The group can be specified by name, UUID, or multicast IP address. (e.g., \"224.1.2.3\" or \"ff38::1\").", + "operationId": "multicast_group_view", + "parameters": [ + { + "in": "path", + "name": "multicast_group", + "description": "Name, ID, or IP address of the multicast group", + "required": true, + "schema": { + "$ref": "#/components/schemas/MulticastGroupIdentifier" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MulticastGroup" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/multicast-groups/{multicast_group}/members": { + "get": { + "tags": [ + "experimental" + ], + "summary": "List members of a multicast group.", + "description": "The group can be specified by name, UUID, or multicast IP address.", + "operationId": "multicast_group_member_list", + "parameters": [ + { + "in": "path", + "name": "multicast_group", + "description": "Name, ID, or IP address of the multicast group", + "required": true, + "schema": { + "$ref": "#/components/schemas/MulticastGroupIdentifier" + } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MulticastGroupMemberResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + }, + "post": { + "tags": [ + "experimental" + ], + "summary": "Add instance to a multicast group.", + "description": "Functionally equivalent to updating the instance's `multicast_groups` field. Both approaches modify the same underlying membership and trigger the same reconciliation logic.\n\nAuthorization: requires Modify on the instance specified in the request body (checked first) and Read on the multicast group (users can only attach instances they are authorized to modify).\n\nGroup Identification: Groups can be referenced by name, IP address, or UUID. All three are fleet-wide unique identifiers: - By name: If group doesn't exist, it's implicitly created with an auto-allocated IP from a multicast pool linked to the caller's silo. Pool selection prefers the default pool; if none, selects alphabetically. - By IP: If group doesn't exist, it's implicitly created using that IP. The pool is determined by which pool contains the IP. - By UUID: Group must already exist.\n\nSource IP filtering (SSM): - Duplicate IPs in the request are automatically deduplicated. - Maximum of 64 source IPs allowed (per RFC 3376, IGMPv3). - Creating a new SSM group: `source_ips` is required. SSM addresses (232.x.x.x for IPv4, FF3x:: for IPv6) require source filtering. - Joining an existing group: If `source_ips` is omitted, the instance inherits the group's existing sources. If specified, they must exactly match the group's sources or the request fails. - Providing `source_ips` to an ASM group (or vice versa) will fail.\n\nSpecify instance by name (requires `?project=`) or UUID.", + "operationId": "multicast_group_member_add", + "parameters": [ + { + "in": "path", + "name": "multicast_group", + "description": "Name, ID, or IP address of the multicast group", + "required": true, + "schema": { + "$ref": "#/components/schemas/MulticastGroupIdentifier" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MulticastGroupMemberAdd" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MulticastGroupMember" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/multicast-groups/{multicast_group}/members/{instance}": { + "delete": { + "tags": [ + "experimental" + ], + "summary": "Remove instance from a multicast group.", + "description": "The group can be specified by name, UUID, or multicast IP address. All three are fleet-wide unique identifiers.\n\nFunctionally equivalent to removing the group from the instance's `multicast_groups` field. Both approaches modify the same underlying membership and trigger reconciliation.\n\nAuthorization: requires Modify on the instance (checked first) and Read on the multicast group.\n\nSpecify instance by name (requires `?project=`) or UUID.", + "operationId": "multicast_group_member_remove", + "parameters": [ + { + "in": "path", + "name": "instance", + "description": "Name or ID of the instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "multicast_group", + "description": "Name, ID, or IP address of the multicast group", + "required": true, + "schema": { + "$ref": "#/components/schemas/MulticastGroupIdentifier" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/network-interfaces": { + "get": { + "tags": [ + "instances" + ], + "summary": "List network interfaces", + "operationId": "instance_network_interface_list", + "parameters": [ + { + "in": "query", + "name": "instance", + "description": "Name or ID of the instance", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `instance` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceNetworkInterfaceResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [ + "instance" + ] + } + }, + "post": { + "tags": [ + "instances" + ], + "summary": "Create network interface", + "operationId": "instance_network_interface_create", + "parameters": [ + { + "in": "query", + "name": "instance", + "description": "Name or ID of the instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `instance` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceNetworkInterfaceCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceNetworkInterface" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/network-interfaces/{interface}": { + "get": { + "tags": [ + "instances" + ], + "summary": "Fetch network interface", + "operationId": "instance_network_interface_view", + "parameters": [ + { + "in": "path", + "name": "interface", + "description": "Name or ID of the network interface", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "instance", + "description": "Name or ID of the instance", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `instance` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceNetworkInterface" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "instances" + ], + "summary": "Update network interface", + "operationId": "instance_network_interface_update", + "parameters": [ + { + "in": "path", + "name": "interface", + "description": "Name or ID of the network interface", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "instance", + "description": "Name or ID of the instance", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `instance` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceNetworkInterfaceUpdate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceNetworkInterface" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "instances" + ], + "summary": "Delete network interface", + "description": "Note that the primary interface for an instance cannot be deleted if there are any secondary interfaces. A new primary interface must be designated first. The primary interface can be deleted if there are no secondary interfaces.", + "operationId": "instance_network_interface_delete", + "parameters": [ + { + "in": "path", + "name": "interface", + "description": "Name or ID of the network interface", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "instance", + "description": "Name or ID of the instance", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `instance` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/ping": { + "get": { + "tags": [ + "system/status" + ], + "summary": "Ping API", + "description": "Always responds with Ok if it responds at all.", + "operationId": "ping", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ping" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/policy": { + "get": { + "tags": [ + "silos" + ], + "summary": "Fetch current silo's IAM policy", + "operationId": "policy_view", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SiloRolePolicy" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "silos" + ], + "summary": "Update current silo's IAM policy", + "operationId": "policy_update", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SiloRolePolicy" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SiloRolePolicy" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/projects": { + "get": { + "tags": [ + "projects" + ], + "summary": "List projects", + "operationId": "project_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ProjectResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + }, + "post": { + "tags": [ + "projects" + ], + "summary": "Create project", + "operationId": "project_create", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ProjectCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Project" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/projects/{project}": { + "get": { + "tags": [ + "projects" + ], + "summary": "Fetch project", + "operationId": "project_view", + "parameters": [ + { + "in": "path", + "name": "project", + "description": "Name or ID of the project", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Project" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "projects" + ], + "summary": "Update a project", + "operationId": "project_update", + "parameters": [ + { + "in": "path", + "name": "project", + "description": "Name or ID of the project", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ProjectUpdate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Project" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "projects" + ], + "summary": "Delete project", + "operationId": "project_delete", + "parameters": [ + { + "in": "path", + "name": "project", + "description": "Name or ID of the project", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/projects/{project}/policy": { + "get": { + "tags": [ + "projects" + ], + "summary": "Fetch project's IAM policy", + "operationId": "project_policy_view", + "parameters": [ + { + "in": "path", + "name": "project", + "description": "Name or ID of the project", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ProjectRolePolicy" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "projects" + ], + "summary": "Update project's IAM policy", + "operationId": "project_policy_update", + "parameters": [ + { + "in": "path", + "name": "project", + "description": "Name or ID of the project", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ProjectRolePolicy" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ProjectRolePolicy" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/snapshots": { + "get": { + "tags": [ + "snapshots" + ], + "summary": "List snapshots", + "operationId": "snapshot_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SnapshotResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [ + "project" + ] + } + }, + "post": { + "tags": [ + "snapshots" + ], + "summary": "Create snapshot", + "description": "Creates a point-in-time snapshot from a disk.", + "operationId": "snapshot_create", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SnapshotCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Snapshot" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/snapshots/{snapshot}": { + "get": { + "tags": [ + "snapshots" + ], + "summary": "Fetch snapshot", + "operationId": "snapshot_view", + "parameters": [ + { + "in": "path", + "name": "snapshot", + "description": "Name or ID of the snapshot", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Snapshot" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "snapshots" + ], + "summary": "Delete snapshot", + "operationId": "snapshot_delete", + "parameters": [ + { + "in": "path", + "name": "snapshot", + "description": "Name or ID of the snapshot", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/audit-log": { + "get": { + "tags": [ + "system/audit-log" + ], + "summary": "View audit log", + "description": "A single item in the audit log represents both the beginning and end of the logged operation (represented by `time_started` and `time_completed`) so that clients do not have to find multiple entries and match them up by request ID to get the full picture of an operation. Because timestamps may not be unique, entries have also have a unique `id` that can be used to deduplicate items fetched from overlapping time intervals.\n\nAudit log entries are designed to be immutable: once you see an entry, fetching it again will never get you a different result. The list is ordered by `time_completed`, not `time_started`. If you fetch the audit log for a time range that is fully in the past, the resulting list is guaranteed to be complete, i.e., fetching the same timespan again later will always produce the same set of entries.", + "operationId": "audit_log_list", + "parameters": [ + { + "in": "query", + "name": "end_time", + "description": "Exclusive", + "schema": { + "nullable": true, + "type": "string", + "format": "date-time" + } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/TimeAndIdSortMode" + } + }, + { + "in": "query", + "name": "start_time", + "description": "Required, inclusive", + "schema": { + "type": "string", + "format": "date-time" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AuditLogEntryResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [ + "start_time" + ] + } + } + }, + "/v1/system/hardware/disks": { + "get": { + "tags": [ + "system/hardware" + ], + "summary": "List physical disks", + "operationId": "physical_disk_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PhysicalDiskResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/system/hardware/disks/{disk_id}": { + "get": { + "tags": [ + "system/hardware" + ], + "summary": "Get a physical disk", + "operationId": "physical_disk_view", + "parameters": [ + { + "in": "path", + "name": "disk_id", + "description": "ID of the physical disk", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PhysicalDisk" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/hardware/rack-switch-port/{rack_id}/{switch_location}/{port}/lldp/neighbors": { + "get": { + "tags": [ + "system/networking" + ], + "summary": "Fetch the LLDP neighbors seen on a switch port", + "operationId": "networking_switch_port_lldp_neighbors", + "parameters": [ + { + "in": "path", + "name": "port", + "description": "A name to use when selecting switch ports.", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + } + }, + { + "in": "path", + "name": "rack_id", + "description": "A rack id to use when selecting switch ports.", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "path", + "name": "switch_location", + "description": "A switch location to use when selecting switch ports.", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LldpNeighborResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/system/hardware/racks": { + "get": { + "tags": [ + "system/hardware" + ], + "summary": "List racks", + "operationId": "rack_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RackResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/system/hardware/racks/{rack_id}": { + "get": { + "tags": [ + "system/hardware" + ], + "summary": "Fetch rack", + "operationId": "rack_view", + "parameters": [ + { + "in": "path", + "name": "rack_id", + "description": "ID of the rack", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Rack" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/hardware/sleds": { + "get": { + "tags": [ + "system/hardware" + ], + "summary": "List sleds", + "operationId": "sled_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SledResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + }, + "post": { + "tags": [ + "system/hardware" + ], + "summary": "Add sled to initialized rack", + "operationId": "sled_add", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UninitializedSledId" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SledId" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/hardware/sleds/{sled_id}": { + "get": { + "tags": [ + "system/hardware" + ], + "summary": "Fetch sled", + "operationId": "sled_view", + "parameters": [ + { + "in": "path", + "name": "sled_id", + "description": "ID of the sled", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sled" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/hardware/sleds/{sled_id}/disks": { + "get": { + "tags": [ + "system/hardware" + ], + "summary": "List physical disks attached to sleds", + "operationId": "sled_physical_disk_list", + "parameters": [ + { + "in": "path", + "name": "sled_id", + "description": "ID of the sled", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PhysicalDiskResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/system/hardware/sleds/{sled_id}/instances": { + "get": { + "tags": [ + "system/hardware" + ], + "summary": "List instances running on given sled", + "operationId": "sled_instance_list", + "parameters": [ + { + "in": "path", + "name": "sled_id", + "description": "ID of the sled", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SledInstanceResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/system/hardware/sleds/{sled_id}/provision-policy": { + "put": { + "tags": [ + "system/hardware" + ], + "summary": "Set sled provision policy", + "operationId": "sled_set_provision_policy", + "parameters": [ + { + "in": "path", + "name": "sled_id", + "description": "ID of the sled", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SledProvisionPolicyParams" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SledProvisionPolicyResponse" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/hardware/sleds-uninitialized": { + "get": { + "tags": [ + "system/hardware" + ], + "summary": "List uninitialized sleds", + "operationId": "sled_list_uninitialized", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UninitializedSledResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/system/hardware/switch-port": { + "get": { + "tags": [ + "system/hardware" + ], + "summary": "List switch ports", + "operationId": "networking_switch_port_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + }, + { + "in": "query", + "name": "switch_port_id", + "description": "An optional switch port id to use when listing switch ports.", + "schema": { + "nullable": true, + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SwitchPortResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/system/hardware/switch-port/{port}/lldp/config": { + "get": { + "tags": [ + "system/networking" + ], + "summary": "Fetch the LLDP configuration for a switch port", + "operationId": "networking_switch_port_lldp_config_view", + "parameters": [ + { + "in": "path", + "name": "port", + "description": "A name to use when selecting switch ports.", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + } + }, + { + "in": "query", + "name": "rack_id", + "description": "A rack id to use when selecting switch ports.", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "query", + "name": "switch_location", + "description": "A switch location to use when selecting switch ports.", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LldpLinkConfig" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "post": { + "tags": [ + "system/networking" + ], + "summary": "Update the LLDP configuration for a switch port", + "operationId": "networking_switch_port_lldp_config_update", + "parameters": [ + { + "in": "path", + "name": "port", + "description": "A name to use when selecting switch ports.", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + } + }, + { + "in": "query", + "name": "rack_id", + "description": "A rack id to use when selecting switch ports.", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "query", + "name": "switch_location", + "description": "A switch location to use when selecting switch ports.", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LldpLinkConfig" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/hardware/switch-port/{port}/settings": { + "post": { + "tags": [ + "system/hardware" + ], + "summary": "Apply switch port settings", + "operationId": "networking_switch_port_apply_settings", + "parameters": [ + { + "in": "path", + "name": "port", + "description": "A name to use when selecting switch ports.", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + } + }, + { + "in": "query", + "name": "rack_id", + "description": "A rack id to use when selecting switch ports.", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "query", + "name": "switch_location", + "description": "A switch location to use when selecting switch ports.", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SwitchPortApplySettings" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "system/hardware" + ], + "summary": "Clear switch port settings", + "operationId": "networking_switch_port_clear_settings", + "parameters": [ + { + "in": "path", + "name": "port", + "description": "A name to use when selecting switch ports.", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + } + }, + { + "in": "query", + "name": "rack_id", + "description": "A rack id to use when selecting switch ports.", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "query", + "name": "switch_location", + "description": "A switch location to use when selecting switch ports.", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + } + } + ], + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/hardware/switch-port/{port}/status": { + "get": { + "tags": [ + "system/hardware" + ], + "summary": "Get switch port status", + "operationId": "networking_switch_port_status", + "parameters": [ + { + "in": "path", + "name": "port", + "description": "A name to use when selecting switch ports.", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + } + }, + { + "in": "query", + "name": "rack_id", + "description": "A rack id to use when selecting switch ports.", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "query", + "name": "switch_location", + "description": "A switch location to use when selecting switch ports.", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SwitchLinkState" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/hardware/switches": { + "get": { + "tags": [ + "system/hardware" + ], + "summary": "List switches", + "operationId": "switch_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SwitchResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/system/hardware/switches/{switch_id}": { + "get": { + "tags": [ + "system/hardware" + ], + "summary": "Fetch switch", + "operationId": "switch_view", + "parameters": [ + { + "in": "path", + "name": "switch_id", + "description": "ID of the switch", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Switch" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/identity-providers": { + "get": { + "tags": [ + "system/silos" + ], + "summary": "List identity providers for silo", + "description": "List identity providers for silo by silo name or ID.", + "operationId": "silo_identity_provider_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "silo", + "description": "Name or ID of the silo", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdentityProviderResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [ + "silo" + ] + } + } + }, + "/v1/system/identity-providers/local/users": { + "post": { + "tags": [ + "system/silos" + ], + "summary": "Create user", + "description": "Users can only be created in Silos with `provision_type` == `Fixed`. Otherwise, Silo users are just-in-time (JIT) provisioned when a user first logs in using an external Identity Provider.", + "operationId": "local_idp_user_create", + "parameters": [ + { + "in": "query", + "name": "silo", + "description": "Name or ID of the silo", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UserCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/User" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/identity-providers/local/users/{user_id}": { + "delete": { + "tags": [ + "system/silos" + ], + "summary": "Delete user", + "operationId": "local_idp_user_delete", + "parameters": [ + { + "in": "path", + "name": "user_id", + "description": "The user's internal ID", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "query", + "name": "silo", + "description": "Name or ID of the silo", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/identity-providers/local/users/{user_id}/set-password": { + "post": { + "tags": [ + "system/silos" + ], + "summary": "Set or invalidate user's password", + "description": "Passwords can only be updated for users in Silos with identity mode `LocalOnly`.", + "operationId": "local_idp_user_set_password", + "parameters": [ + { + "in": "path", + "name": "user_id", + "description": "The user's internal ID", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "query", + "name": "silo", + "description": "Name or ID of the silo", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UserPassword" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/identity-providers/saml": { + "post": { + "tags": [ + "system/silos" + ], + "summary": "Create SAML identity provider", + "operationId": "saml_identity_provider_create", + "parameters": [ + { + "in": "query", + "name": "silo", + "description": "Name or ID of the silo", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SamlIdentityProviderCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SamlIdentityProvider" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/identity-providers/saml/{provider}": { + "get": { + "tags": [ + "system/silos" + ], + "summary": "Fetch SAML identity provider", + "operationId": "saml_identity_provider_view", + "parameters": [ + { + "in": "path", + "name": "provider", + "description": "Name or ID of the SAML identity provider", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "silo", + "description": "Name or ID of the silo", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SamlIdentityProvider" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/ip-pools": { + "get": { + "tags": [ + "system/ip-pools" + ], + "summary": "List IP pools", + "operationId": "ip_pool_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPoolResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + }, + "post": { + "tags": [ + "system/ip-pools" + ], + "summary": "Create IP pool", + "description": "IPv6 is not yet supported for unicast pools.", + "operationId": "ip_pool_create", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPoolCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPool" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/ip-pools/{pool}": { + "get": { + "tags": [ + "system/ip-pools" + ], + "summary": "Fetch IP pool", + "operationId": "ip_pool_view", + "parameters": [ + { + "in": "path", + "name": "pool", + "description": "Name or ID of the IP pool", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPool" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "system/ip-pools" + ], + "summary": "Update IP pool", + "operationId": "ip_pool_update", + "parameters": [ + { + "in": "path", + "name": "pool", + "description": "Name or ID of the IP pool", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPoolUpdate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPool" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "system/ip-pools" + ], + "summary": "Delete IP pool", + "operationId": "ip_pool_delete", + "parameters": [ + { + "in": "path", + "name": "pool", + "description": "Name or ID of the IP pool", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/ip-pools/{pool}/ranges": { + "get": { + "tags": [ + "system/ip-pools" + ], + "summary": "List ranges for IP pool", + "description": "Ranges are ordered by their first address.", + "operationId": "ip_pool_range_list", + "parameters": [ + { + "in": "path", + "name": "pool", + "description": "Name or ID of the IP pool", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPoolRangeResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/system/ip-pools/{pool}/ranges/add": { + "post": { + "tags": [ + "system/ip-pools" + ], + "summary": "Add range to IP pool.", + "description": "IPv6 ranges are not allowed yet for unicast pools.\n\nFor multicast pools, all ranges must be either Any-Source Multicast (ASM) or Source-Specific Multicast (SSM), but not both. Mixing ASM and SSM ranges in the same pool is not allowed.\n\nASM: IPv4 addresses outside 232.0.0.0/8, IPv6 addresses with flag field != 3 SSM: IPv4 addresses in 232.0.0.0/8, IPv6 addresses with flag field = 3", + "operationId": "ip_pool_range_add", + "parameters": [ + { + "in": "path", + "name": "pool", + "description": "Name or ID of the IP pool", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpRange" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPoolRange" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/ip-pools/{pool}/ranges/remove": { + "post": { + "tags": [ + "system/ip-pools" + ], + "summary": "Remove range from IP pool", + "operationId": "ip_pool_range_remove", + "parameters": [ + { + "in": "path", + "name": "pool", + "description": "Name or ID of the IP pool", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpRange" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/ip-pools/{pool}/silos": { + "get": { + "tags": [ + "system/ip-pools" + ], + "summary": "List IP pool's linked silos", + "operationId": "ip_pool_silo_list", + "parameters": [ + { + "in": "path", + "name": "pool", + "description": "Name or ID of the IP pool", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPoolSiloLinkResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + }, + "post": { + "tags": [ + "system/ip-pools" + ], + "summary": "Link IP pool to silo", + "description": "Users in linked silos can allocate external IPs from this pool for their instances. A silo can have at most one default pool. IPs are allocated from the default pool when users ask for one without specifying a pool.", + "operationId": "ip_pool_silo_link", + "parameters": [ + { + "in": "path", + "name": "pool", + "description": "Name or ID of the IP pool", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPoolLinkSilo" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPoolSiloLink" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/ip-pools/{pool}/silos/{silo}": { + "put": { + "tags": [ + "system/ip-pools" + ], + "summary": "Make IP pool default for silo", + "description": "When a user asks for an IP (e.g., at instance create time) without specifying a pool, the IP comes from the default pool if a default is configured. When a pool is made the default for a silo, any existing default will remain linked to the silo, but will no longer be the default.", + "operationId": "ip_pool_silo_update", + "parameters": [ + { + "in": "path", + "name": "pool", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "silo", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPoolSiloUpdate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPoolSiloLink" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "system/ip-pools" + ], + "summary": "Unlink IP pool from silo", + "description": "Will fail if there are any outstanding IPs allocated in the silo.", + "operationId": "ip_pool_silo_unlink", + "parameters": [ + { + "in": "path", + "name": "pool", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "silo", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/ip-pools/{pool}/utilization": { + "get": { + "tags": [ + "system/ip-pools" + ], + "summary": "Fetch IP pool utilization", + "operationId": "ip_pool_utilization_view", + "parameters": [ + { + "in": "path", + "name": "pool", + "description": "Name or ID of the IP pool", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPoolUtilization" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/ip-pools-service": { + "get": { + "tags": [ + "system/ip-pools" + ], + "summary": "Fetch Oxide service IP pool", + "operationId": "ip_pool_service_view", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPool" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/ip-pools-service/ranges": { + "get": { + "tags": [ + "system/ip-pools" + ], + "summary": "List IP ranges for the Oxide service pool", + "description": "Ranges are ordered by their first address.", + "operationId": "ip_pool_service_range_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPoolRangeResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/system/ip-pools-service/ranges/add": { + "post": { + "tags": [ + "system/ip-pools" + ], + "summary": "Add IP range to Oxide service pool", + "description": "IPv6 ranges are not allowed yet.", + "operationId": "ip_pool_service_range_add", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpRange" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpPoolRange" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/ip-pools-service/ranges/remove": { + "post": { + "tags": [ + "system/ip-pools" + ], + "summary": "Remove IP range from Oxide service pool", + "operationId": "ip_pool_service_range_remove", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpRange" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/metrics/{metric_name}": { + "get": { + "tags": [ + "system/metrics" + ], + "summary": "View metrics", + "description": "View CPU, memory, or storage utilization metrics at the fleet or silo level.", + "operationId": "system_metric", + "parameters": [ + { + "in": "path", + "name": "metric_name", + "required": true, + "schema": { + "$ref": "#/components/schemas/SystemMetricName" + } + }, + { + "in": "query", + "name": "end_time", + "description": "An exclusive end time of metrics.", + "schema": { + "type": "string", + "format": "date-time" + } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "order", + "description": "Query result order", + "schema": { + "$ref": "#/components/schemas/PaginationOrder" + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "start_time", + "description": "An inclusive start time of metrics.", + "schema": { + "type": "string", + "format": "date-time" + } + }, + { + "in": "query", + "name": "silo", + "description": "Name or ID of the silo", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MeasurementResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [ + "end_time", + "start_time" + ] + } + } + }, + "/v1/system/networking/address-lot": { + "get": { + "tags": [ + "system/networking" + ], + "summary": "List address lots", + "operationId": "networking_address_lot_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AddressLotResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + }, + "post": { + "tags": [ + "system/networking" + ], + "summary": "Create address lot", + "operationId": "networking_address_lot_create", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AddressLotCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AddressLotCreateResponse" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/networking/address-lot/{address_lot}": { + "get": { + "tags": [ + "system/networking" + ], + "summary": "Fetch address lot", + "operationId": "networking_address_lot_view", + "parameters": [ + { + "in": "path", + "name": "address_lot", + "description": "Name or ID of the address lot", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AddressLotViewResponse" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "system/networking" + ], + "summary": "Delete address lot", + "operationId": "networking_address_lot_delete", + "parameters": [ + { + "in": "path", + "name": "address_lot", + "description": "Name or ID of the address lot", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/networking/address-lot/{address_lot}/blocks": { + "get": { + "tags": [ + "system/networking" + ], + "summary": "List blocks in address lot", + "operationId": "networking_address_lot_block_list", + "parameters": [ + { + "in": "path", + "name": "address_lot", + "description": "Name or ID of the address lot", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AddressLotBlockResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/system/networking/allow-list": { + "get": { + "tags": [ + "system/networking" + ], + "summary": "Get user-facing services IP allowlist", + "operationId": "networking_allow_list_view", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AllowList" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "system/networking" + ], + "summary": "Update user-facing services IP allowlist", + "operationId": "networking_allow_list_update", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AllowListUpdate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AllowList" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/networking/bfd-disable": { + "post": { + "tags": [ + "system/networking" + ], + "summary": "Disable a BFD session", + "operationId": "networking_bfd_disable", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BfdSessionDisable" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/networking/bfd-enable": { + "post": { + "tags": [ + "system/networking" + ], + "summary": "Enable a BFD session", + "operationId": "networking_bfd_enable", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BfdSessionEnable" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/networking/bfd-status": { + "get": { + "tags": [ + "system/networking" + ], + "summary": "Get BFD status", + "operationId": "networking_bfd_status", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_BfdStatus", + "type": "array", + "items": { + "$ref": "#/components/schemas/BfdStatus" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/networking/bgp": { + "get": { + "tags": [ + "system/networking" + ], + "summary": "List BGP configurations", + "operationId": "networking_bgp_config_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BgpConfigResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + }, + "post": { + "tags": [ + "system/networking" + ], + "summary": "Create new BGP configuration", + "operationId": "networking_bgp_config_create", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BgpConfigCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BgpConfig" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "system/networking" + ], + "summary": "Delete BGP configuration", + "operationId": "networking_bgp_config_delete", + "parameters": [ + { + "in": "query", + "name": "name_or_id", + "description": "A name or id to use when selecting BGP config.", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/networking/bgp-announce-set": { + "get": { + "tags": [ + "system/networking" + ], + "summary": "List BGP announce sets", + "operationId": "networking_bgp_announce_set_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_BgpAnnounceSet", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpAnnounceSet" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + }, + "put": { + "tags": [ + "system/networking" + ], + "summary": "Update BGP announce set", + "description": "If the announce set exists, this endpoint replaces the existing announce set with the one specified.", + "operationId": "networking_bgp_announce_set_update", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BgpAnnounceSetCreate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BgpAnnounceSet" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/networking/bgp-announce-set/{announce_set}": { + "delete": { + "tags": [ + "system/networking" + ], + "summary": "Delete BGP announce set", + "operationId": "networking_bgp_announce_set_delete", + "parameters": [ + { + "in": "path", + "name": "announce_set", + "description": "Name or ID of the announce set", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/networking/bgp-announce-set/{announce_set}/announcement": { + "get": { + "tags": [ + "system/networking" + ], + "summary": "Get originated routes for a specified BGP announce set", + "operationId": "networking_bgp_announcement_list", + "parameters": [ + { + "in": "path", + "name": "announce_set", + "description": "Name or ID of the announce set", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_BgpAnnouncement", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpAnnouncement" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/networking/bgp-exported": { + "get": { + "tags": [ + "system/networking" + ], + "summary": "Get BGP exported routes", + "operationId": "networking_bgp_exported", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BgpExported" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/networking/bgp-message-history": { + "get": { + "tags": [ + "system/networking" + ], + "summary": "Get BGP router message history", + "operationId": "networking_bgp_message_history", + "parameters": [ + { + "in": "query", + "name": "asn", + "description": "The ASN to filter on. Required.", + "required": true, + "schema": { + "type": "integer", + "format": "uint32", + "minimum": 0 + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AggregateBgpMessageHistory" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/networking/bgp-routes-ipv4": { + "get": { + "tags": [ + "system/networking" + ], + "summary": "Get imported IPv4 BGP routes", + "operationId": "networking_bgp_imported_routes_ipv4", + "parameters": [ + { + "in": "query", + "name": "asn", + "description": "The ASN to filter on. Required.", + "required": true, + "schema": { + "type": "integer", + "format": "uint32", + "minimum": 0 + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_BgpImportedRouteIpv4", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpImportedRouteIpv4" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/networking/bgp-status": { + "get": { + "tags": [ + "system/networking" + ], + "summary": "Get BGP peer status", + "operationId": "networking_bgp_status", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_BgpPeerStatus", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpPeerStatus" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/networking/inbound-icmp": { + "get": { + "tags": [ + "system/networking" + ], + "summary": "Return whether API services can receive limited ICMP traffic", + "operationId": "networking_inbound_icmp_view", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ServiceIcmpConfig" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "system/networking" + ], + "summary": "Set whether API services can receive limited ICMP traffic", + "operationId": "networking_inbound_icmp_update", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ServiceIcmpConfig" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/networking/loopback-address": { + "get": { + "tags": [ + "system/networking" + ], + "summary": "List loopback addresses", + "operationId": "networking_loopback_address_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LoopbackAddressResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + }, + "post": { + "tags": [ + "system/networking" + ], + "summary": "Create loopback address", + "operationId": "networking_loopback_address_create", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LoopbackAddressCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LoopbackAddress" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/networking/loopback-address/{rack_id}/{switch_location}/{address}/{subnet_mask}": { + "delete": { + "tags": [ + "system/networking" + ], + "summary": "Delete loopback address", + "operationId": "networking_loopback_address_delete", + "parameters": [ + { + "in": "path", + "name": "address", + "description": "The IP address and subnet mask to use when selecting the loopback address.", + "required": true, + "schema": { + "type": "string", + "format": "ip" + } + }, + { + "in": "path", + "name": "rack_id", + "description": "The rack to use when selecting the loopback address.", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "path", + "name": "subnet_mask", + "description": "The IP address and subnet mask to use when selecting the loopback address.", + "required": true, + "schema": { + "type": "integer", + "format": "uint8", + "minimum": 0 + } + }, + { + "in": "path", + "name": "switch_location", + "description": "The switch location to use when selecting the loopback address.", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/networking/switch-port-settings": { + "get": { + "tags": [ + "system/networking" + ], + "summary": "List switch port settings", + "operationId": "networking_switch_port_settings_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "port_settings", + "description": "An optional name or id to use when selecting port settings.", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SwitchPortSettingsIdentityResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + }, + "post": { + "tags": [ + "system/networking" + ], + "summary": "Create switch port settings", + "operationId": "networking_switch_port_settings_create", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SwitchPortSettingsCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SwitchPortSettings" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "system/networking" + ], + "summary": "Delete switch port settings", + "operationId": "networking_switch_port_settings_delete", + "parameters": [ + { + "in": "query", + "name": "port_settings", + "description": "An optional name or id to use when selecting port settings.", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/networking/switch-port-settings/{port}": { + "get": { + "tags": [ + "system/networking" + ], + "summary": "Get information about switch port", + "operationId": "networking_switch_port_settings_view", + "parameters": [ + { + "in": "path", + "name": "port", + "description": "A name or id to use when selecting switch port settings info objects.", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SwitchPortSettings" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/policy": { + "get": { + "tags": [ + "policy" + ], + "summary": "Fetch top-level IAM policy", + "operationId": "system_policy_view", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FleetRolePolicy" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "policy" + ], + "summary": "Update top-level IAM policy", + "operationId": "system_policy_update", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FleetRolePolicy" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FleetRolePolicy" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/scim/tokens": { + "get": { + "tags": [ + "system/silos" + ], + "summary": "List SCIM tokens", + "description": "Specify the silo by name or ID using the `silo` query parameter.", + "operationId": "scim_token_list", + "parameters": [ + { + "in": "query", + "name": "silo", + "description": "Name or ID of the silo", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_ScimClientBearerToken", + "type": "array", + "items": { + "$ref": "#/components/schemas/ScimClientBearerToken" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "post": { + "tags": [ + "system/silos" + ], + "summary": "Create SCIM token", + "description": "Specify the silo by name or ID using the `silo` query parameter. Be sure to save the bearer token in the response. It will not be retrievable later through the token view and list endpoints.", + "operationId": "scim_token_create", + "parameters": [ + { + "in": "query", + "name": "silo", + "description": "Name or ID of the silo", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ScimClientBearerTokenValue" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/scim/tokens/{token_id}": { + "get": { + "tags": [ + "system/silos" + ], + "summary": "Fetch SCIM token", + "description": "Specify the silo by name or ID using the `silo` query parameter.", + "operationId": "scim_token_view", + "parameters": [ + { + "in": "path", + "name": "token_id", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "query", + "name": "silo", + "description": "Name or ID of the silo", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ScimClientBearerToken" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "system/silos" + ], + "summary": "Delete SCIM token", + "description": "Specify the silo by name or ID using the `silo` query parameter.", + "operationId": "scim_token_delete", + "parameters": [ + { + "in": "path", + "name": "token_id", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "query", + "name": "silo", + "description": "Name or ID of the silo", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/silo-quotas": { + "get": { + "tags": [ + "system/silos" + ], + "summary": "Lists resource quotas for all silos", + "operationId": "system_quotas_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SiloQuotasResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/system/silos": { + "get": { + "tags": [ + "system/silos" + ], + "summary": "List silos", + "description": "Lists silos that are discoverable based on the current permissions.", + "operationId": "silo_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SiloResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + }, + "post": { + "tags": [ + "system/silos" + ], + "summary": "Create a silo", + "operationId": "silo_create", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SiloCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Silo" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/silos/{silo}": { + "get": { + "tags": [ + "system/silos" + ], + "summary": "Fetch silo", + "description": "Fetch silo by name or ID.", + "operationId": "silo_view", + "parameters": [ + { + "in": "path", + "name": "silo", + "description": "Name or ID of the silo", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Silo" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "system/silos" + ], + "summary": "Delete a silo", + "description": "Delete a silo by name or ID.", + "operationId": "silo_delete", + "parameters": [ + { + "in": "path", + "name": "silo", + "description": "Name or ID of the silo", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/silos/{silo}/ip-pools": { + "get": { + "tags": [ + "system/silos" + ], + "summary": "List IP pools linked to silo", + "description": "Linked IP pools are available to users in the specified silo. A silo can have at most one default pool. IPs are allocated from the default pool when users ask for one without specifying a pool.", + "operationId": "silo_ip_pool_list", + "parameters": [ + { + "in": "path", + "name": "silo", + "description": "Name or ID of the silo", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SiloIpPoolResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/system/silos/{silo}/policy": { + "get": { + "tags": [ + "system/silos" + ], + "summary": "Fetch silo IAM policy", + "operationId": "silo_policy_view", + "parameters": [ + { + "in": "path", + "name": "silo", + "description": "Name or ID of the silo", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SiloRolePolicy" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "system/silos" + ], + "summary": "Update silo IAM policy", + "operationId": "silo_policy_update", + "parameters": [ + { + "in": "path", + "name": "silo", + "description": "Name or ID of the silo", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SiloRolePolicy" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SiloRolePolicy" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/silos/{silo}/quotas": { + "get": { + "tags": [ + "system/silos" + ], + "summary": "Fetch resource quotas for silo", + "operationId": "silo_quotas_view", + "parameters": [ + { + "in": "path", + "name": "silo", + "description": "Name or ID of the silo", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SiloQuotas" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "system/silos" + ], + "summary": "Update resource quotas for silo", + "description": "If a quota value is not specified, it will remain unchanged.", + "operationId": "silo_quotas_update", + "parameters": [ + { + "in": "path", + "name": "silo", + "description": "Name or ID of the silo", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SiloQuotasUpdate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SiloQuotas" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/timeseries/query": { + "post": { + "tags": [ + "system/metrics" + ], + "summary": "Run timeseries query", + "description": "Queries are written in OxQL.", + "operationId": "system_timeseries_query", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TimeseriesQuery" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OxqlQueryResult" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/timeseries/schemas": { + "get": { + "tags": [ + "system/metrics" + ], + "summary": "List timeseries schemas", + "operationId": "system_timeseries_schema_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TimeseriesSchemaResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/system/update/repositories": { + "get": { + "tags": [ + "system/update" + ], + "summary": "List all TUF repositories", + "description": "Returns a paginated list of all TUF repositories ordered by system version (newest first by default).", + "operationId": "system_update_repository_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/VersionSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TufRepoResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + }, + "put": { + "tags": [ + "system/update" + ], + "summary": "Upload system release repository", + "description": "System release repositories are verified by the updates trust store.", + "operationId": "system_update_repository_upload", + "parameters": [ + { + "in": "query", + "name": "file_name", + "description": "The name of the uploaded file.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/octet-stream": { + "schema": { + "type": "string", + "format": "binary" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TufRepoUpload" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/update/repositories/{system_version}": { + "get": { + "tags": [ + "system/update" + ], + "summary": "Fetch system release repository by version", + "operationId": "system_update_repository_view", + "parameters": [ + { + "in": "path", + "name": "system_version", + "description": "The version to get.", + "required": true, + "schema": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TufRepo" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/update/status": { + "get": { + "tags": [ + "system/update" + ], + "summary": "Fetch system update status", + "description": "Returns information about the current target release and the progress of system software updates.", + "operationId": "system_update_status", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateStatus" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/update/target-release": { + "put": { + "tags": [ + "system/update" + ], + "summary": "Set target release", + "description": "Set the current target release of the rack's system software. The rack reconfigurator will treat the software specified here as a goal state for the rack's software, and attempt to asynchronously update to that release. Use the update status endpoint to view the current target release.", + "operationId": "target_release_update", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SetTargetReleaseParams" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/update/trust-roots": { + "get": { + "tags": [ + "system/update" + ], + "summary": "List root roles in the updates trust store", + "description": "A root role is a JSON document describing the cryptographic keys that are trusted to sign system release repositories, as described by The Update Framework. Uploading a repository requires its metadata to be signed by keys trusted by the trust store.", + "operationId": "system_update_trust_root_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdatesTrustRootResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + }, + "post": { + "tags": [ + "system/update" + ], + "summary": "Add trusted root role to updates trust store", + "operationId": "system_update_trust_root_create", + "requestBody": { + "content": { + "application/json": { + "schema": {} + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdatesTrustRoot" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/update/trust-roots/{trust_root_id}": { + "get": { + "tags": [ + "system/update" + ], + "summary": "Fetch trusted root role", + "operationId": "system_update_trust_root_view", + "parameters": [ + { + "in": "path", + "name": "trust_root_id", + "description": "ID of the trust root", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdatesTrustRoot" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "system/update" + ], + "summary": "Delete trusted root role", + "description": "Note that this method does not currently check for any uploaded system release repositories that would become untrusted after deleting the root role.", + "operationId": "system_update_trust_root_delete", + "parameters": [ + { + "in": "path", + "name": "trust_root_id", + "description": "ID of the trust root", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/users": { + "get": { + "tags": [ + "system/silos" + ], + "summary": "List built-in (system) users in silo", + "operationId": "silo_user_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "silo", + "description": "Name or ID of the silo", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UserResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [ + "silo" + ] + } + } + }, + "/v1/system/users/{user_id}": { + "get": { + "tags": [ + "system/silos" + ], + "summary": "Fetch built-in (system) user", + "operationId": "silo_user_view", + "parameters": [ + { + "in": "path", + "name": "user_id", + "description": "The user's internal ID", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "query", + "name": "silo", + "description": "Name or ID of the silo", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/User" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/users-builtin": { + "get": { + "tags": [ + "system/silos" + ], + "summary": "List built-in users", + "operationId": "user_builtin_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UserBuiltinResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/system/users-builtin/{user}": { + "get": { + "tags": [ + "system/silos" + ], + "summary": "Fetch built-in user", + "operationId": "user_builtin_view", + "parameters": [ + { + "in": "path", + "name": "user", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UserBuiltin" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/system/utilization/silos": { + "get": { + "tags": [ + "system/silos" + ], + "summary": "List current utilization state for all silos", + "operationId": "silo_utilization_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SiloUtilizationResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/system/utilization/silos/{silo}": { + "get": { + "tags": [ + "system/silos" + ], + "summary": "Fetch current utilization for given silo", + "operationId": "silo_utilization_view", + "parameters": [ + { + "in": "path", + "name": "silo", + "description": "Name or ID of the silo", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SiloUtilization" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/timeseries/query": { + "post": { + "tags": [ + "experimental" + ], + "summary": "Run project-scoped timeseries query", + "description": "Queries are written in OxQL. Project must be specified by name or ID in URL query parameter. The OxQL query will only return timeseries data from the specified project.", + "operationId": "timeseries_query", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TimeseriesQuery" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OxqlQueryResult" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/users": { + "get": { + "tags": [ + "silos" + ], + "summary": "List users", + "operationId": "user_list", + "parameters": [ + { + "in": "query", + "name": "group", + "schema": { + "nullable": true, + "type": "string", + "format": "uuid" + } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UserResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/users/{user_id}": { + "get": { + "tags": [ + "silos" + ], + "summary": "Fetch user", + "operationId": "user_view", + "parameters": [ + { + "in": "path", + "name": "user_id", + "description": "ID of the user", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/User" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/users/{user_id}/access-tokens": { + "get": { + "tags": [ + "silos" + ], + "summary": "List user's access tokens", + "operationId": "user_token_list", + "parameters": [ + { + "in": "path", + "name": "user_id", + "description": "ID of the user", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeviceAccessTokenResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/users/{user_id}/logout": { + "post": { + "tags": [ + "silos" + ], + "summary": "Log user out", + "description": "Silo admins can use this endpoint to log the specified user out by deleting all of their tokens AND sessions. This cannot be undone.", + "operationId": "user_logout", + "parameters": [ + { + "in": "path", + "name": "user_id", + "description": "ID of the user", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/users/{user_id}/sessions": { + "get": { + "tags": [ + "silos" + ], + "summary": "List user's console sessions", + "operationId": "user_session_list", + "parameters": [ + { + "in": "path", + "name": "user_id", + "description": "ID of the user", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ConsoleSessionResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/utilization": { + "get": { + "tags": [ + "silos" + ], + "summary": "Fetch resource utilization for user's current silo", + "operationId": "utilization_view", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Utilization" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/vpc-firewall-rules": { + "get": { + "tags": [ + "vpcs" + ], + "summary": "List firewall rules", + "operationId": "vpc_firewall_rules_view", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcFirewallRules" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "vpcs" + ], + "summary": "Replace firewall rules", + "description": "The maximum number of rules per VPC is 1024.\n\nTargets are used to specify the set of instances to which a firewall rule applies. You can target instances directly by name, or specify a VPC, VPC subnet, IP, or IP subnet, which will apply the rule to traffic going to all matching instances. Targets are additive: the rule applies to instances matching ANY target. The maximum number of targets is 256.\n\nFilters reduce the scope of a firewall rule. Without filters, the rule applies to all packets to the targets (or from the targets, if it's an outbound rule). With multiple filters, the rule applies only to packets matching ALL filters. The maximum number of each type of filter is 256.", + "operationId": "vpc_firewall_rules_update", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcFirewallRuleUpdateParams" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcFirewallRules" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/vpc-router-routes": { + "get": { + "tags": [ + "vpcs" + ], + "summary": "List routes", + "description": "List the routes associated with a router in a particular VPC.", + "operationId": "vpc_router_route_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "router", + "description": "Name or ID of the router", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC, only required if `router` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RouterRouteResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [ + "router" + ] + } + }, + "post": { + "tags": [ + "vpcs" + ], + "summary": "Create route", + "operationId": "vpc_router_route_create", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "router", + "description": "Name or ID of the router", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC, only required if `router` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RouterRouteCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RouterRoute" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/vpc-router-routes/{route}": { + "get": { + "tags": [ + "vpcs" + ], + "summary": "Fetch route", + "operationId": "vpc_router_route_view", + "parameters": [ + { + "in": "path", + "name": "route", + "description": "Name or ID of the route", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "router", + "description": "Name or ID of the router", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC, only required if `router` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RouterRoute" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "vpcs" + ], + "summary": "Update route", + "operationId": "vpc_router_route_update", + "parameters": [ + { + "in": "path", + "name": "route", + "description": "Name or ID of the route", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "router", + "description": "Name or ID of the router", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC, only required if `router` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RouterRouteUpdate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RouterRoute" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "vpcs" + ], + "summary": "Delete route", + "operationId": "vpc_router_route_delete", + "parameters": [ + { + "in": "path", + "name": "route", + "description": "Name or ID of the route", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "router", + "description": "Name or ID of the router", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC, only required if `router` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/vpc-routers": { + "get": { + "tags": [ + "vpcs" + ], + "summary": "List routers", + "operationId": "vpc_router_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcRouterResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [ + "vpc" + ] + } + }, + "post": { + "tags": [ + "vpcs" + ], + "summary": "Create VPC router", + "operationId": "vpc_router_create", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcRouterCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcRouter" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/vpc-routers/{router}": { + "get": { + "tags": [ + "vpcs" + ], + "summary": "Fetch router", + "operationId": "vpc_router_view", + "parameters": [ + { + "in": "path", + "name": "router", + "description": "Name or ID of the router", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcRouter" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "vpcs" + ], + "summary": "Update router", + "operationId": "vpc_router_update", + "parameters": [ + { + "in": "path", + "name": "router", + "description": "Name or ID of the router", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcRouterUpdate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcRouter" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "vpcs" + ], + "summary": "Delete router", + "operationId": "vpc_router_delete", + "parameters": [ + { + "in": "path", + "name": "router", + "description": "Name or ID of the router", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/vpc-subnets": { + "get": { + "tags": [ + "vpcs" + ], + "summary": "List subnets", + "operationId": "vpc_subnet_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcSubnetResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [ + "vpc" + ] + } + }, + "post": { + "tags": [ + "vpcs" + ], + "summary": "Create subnet", + "operationId": "vpc_subnet_create", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcSubnetCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcSubnet" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/vpc-subnets/{subnet}": { + "get": { + "tags": [ + "vpcs" + ], + "summary": "Fetch subnet", + "operationId": "vpc_subnet_view", + "parameters": [ + { + "in": "path", + "name": "subnet", + "description": "Name or ID of the subnet", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcSubnet" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "vpcs" + ], + "summary": "Update subnet", + "operationId": "vpc_subnet_update", + "parameters": [ + { + "in": "path", + "name": "subnet", + "description": "Name or ID of the subnet", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcSubnetUpdate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcSubnet" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "vpcs" + ], + "summary": "Delete subnet", + "operationId": "vpc_subnet_delete", + "parameters": [ + { + "in": "path", + "name": "subnet", + "description": "Name or ID of the subnet", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/vpc-subnets/{subnet}/network-interfaces": { + "get": { + "tags": [ + "vpcs" + ], + "summary": "List network interfaces", + "operationId": "vpc_subnet_list_network_interfaces", + "parameters": [ + { + "in": "path", + "name": "subnet", + "description": "Name or ID of the subnet", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceNetworkInterfaceResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/vpcs": { + "get": { + "tags": [ + "vpcs" + ], + "summary": "List VPCs", + "operationId": "vpc_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [ + "project" + ] + } + }, + "post": { + "tags": [ + "vpcs" + ], + "summary": "Create VPC", + "operationId": "vpc_create", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Vpc" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/vpcs/{vpc}": { + "get": { + "tags": [ + "vpcs" + ], + "summary": "Fetch VPC", + "operationId": "vpc_view", + "parameters": [ + { + "in": "path", + "name": "vpc", + "description": "Name or ID of the VPC", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Vpc" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "vpcs" + ], + "summary": "Update a VPC", + "operationId": "vpc_update", + "parameters": [ + { + "in": "path", + "name": "vpc", + "description": "Name or ID of the VPC", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcUpdate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Vpc" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "vpcs" + ], + "summary": "Delete VPC", + "operationId": "vpc_delete", + "parameters": [ + { + "in": "path", + "name": "vpc", + "description": "Name or ID of the VPC", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/webhook-receivers": { + "post": { + "tags": [ + "system/alerts" + ], + "summary": "Create webhook receiver", + "operationId": "webhook_receiver_create", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WebhookCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WebhookReceiver" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/webhook-receivers/{receiver}": { + "put": { + "tags": [ + "system/alerts" + ], + "summary": "Update webhook receiver", + "description": "Note that receiver secrets are NOT added or removed using this endpoint. Instead, use the `/v1/webhooks/{secrets}/?receiver={receiver}` endpoint to add and remove secrets.", + "operationId": "webhook_receiver_update", + "parameters": [ + { + "in": "path", + "name": "receiver", + "description": "The name or ID of the webhook receiver.", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WebhookReceiverUpdate" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/webhook-secrets": { + "get": { + "tags": [ + "system/alerts" + ], + "summary": "List webhook receiver secret IDs", + "operationId": "webhook_secrets_list", + "parameters": [ + { + "in": "query", + "name": "receiver", + "description": "The name or ID of the webhook receiver.", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WebhookSecrets" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "post": { + "tags": [ + "system/alerts" + ], + "summary": "Add secret to webhook receiver", + "operationId": "webhook_secrets_add", + "parameters": [ + { + "in": "query", + "name": "receiver", + "description": "The name or ID of the webhook receiver.", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WebhookSecretCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WebhookSecret" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/webhook-secrets/{secret_id}": { + "delete": { + "tags": [ + "system/alerts" + ], + "summary": "Remove secret from webhook receiver", + "operationId": "webhook_secrets_delete", + "parameters": [ + { + "in": "path", + "name": "secret_id", + "description": "ID of the secret.", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + } + }, + "components": { + "schemas": { + "Address": { + "description": "An address tied to an address lot.", + "type": "object", + "properties": { + "address": { + "description": "The address and prefix length of this address.", + "allOf": [ + { + "$ref": "#/components/schemas/IpNet" + } + ] + }, + "address_lot": { + "description": "The address lot this address is drawn from.", + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" + } + ] + }, + "vlan_id": { + "nullable": true, + "description": "Optional VLAN ID for this address", + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "address", + "address_lot" + ] + }, + "AddressConfig": { + "description": "A set of addresses associated with a port configuration.", + "type": "object", + "properties": { + "addresses": { + "description": "The set of addresses assigned to the port configuration.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Address" + } + }, + "link_name": { + "description": "Link to assign the addresses to. On ports that are not broken out, this is always phy0. On a 2x breakout the options are phy0 and phy1, on 4x phy0-phy3, etc.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + } + }, + "required": [ + "addresses", + "link_name" + ] + }, + "AddressLot": { + "description": "Represents an address lot object, containing the id of the lot that can be used in other API calls.", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "kind": { + "description": "Desired use of `AddressLot`", + "allOf": [ + { + "$ref": "#/components/schemas/AddressLotKind" + } + ] + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "id", + "kind", + "name", + "time_created", + "time_modified" + ] + }, + "AddressLotBlock": { + "description": "An address lot block is a part of an address lot and contains a range of addresses. The range is inclusive.", + "type": "object", + "properties": { + "first_address": { + "description": "The first address of the block (inclusive).", + "type": "string", + "format": "ip" + }, + "id": { + "description": "The id of the address lot block.", + "type": "string", + "format": "uuid" + }, + "last_address": { + "description": "The last address of the block (inclusive).", + "type": "string", + "format": "ip" + } + }, + "required": [ + "first_address", + "id", + "last_address" + ] + }, + "AddressLotBlockCreate": { + "description": "Parameters for creating an address lot block. Fist and last addresses are inclusive.", + "type": "object", + "properties": { + "first_address": { + "description": "The first address in the lot (inclusive).", + "type": "string", + "format": "ip" + }, + "last_address": { + "description": "The last address in the lot (inclusive).", + "type": "string", + "format": "ip" + } + }, + "required": [ + "first_address", + "last_address" + ] + }, + "AddressLotBlockResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/AddressLotBlock" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "AddressLotCreate": { + "description": "Parameters for creating an address lot.", + "type": "object", + "properties": { + "blocks": { + "description": "The blocks to add along with the new address lot.", + "type": "array", + "items": { + "$ref": "#/components/schemas/AddressLotBlockCreate" + } + }, + "description": { + "type": "string" + }, + "kind": { + "description": "The kind of address lot to create.", + "allOf": [ + { + "$ref": "#/components/schemas/AddressLotKind" + } + ] + }, + "name": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "blocks", + "description", + "kind", + "name" + ] + }, + "AddressLotCreateResponse": { + "description": "An address lot and associated blocks resulting from creating an address lot.", + "type": "object", + "properties": { + "blocks": { + "description": "The address lot blocks that were created.", + "type": "array", + "items": { + "$ref": "#/components/schemas/AddressLotBlock" + } + }, + "lot": { + "description": "The address lot that was created.", + "allOf": [ + { + "$ref": "#/components/schemas/AddressLot" + } + ] + } + }, + "required": [ + "blocks", + "lot" + ] + }, + "AddressLotKind": { + "description": "The kind associated with an address lot.", + "oneOf": [ + { + "description": "Infrastructure address lots are used for network infrastructure like addresses assigned to rack switches.", + "type": "string", + "enum": [ + "infra" + ] + }, + { + "description": "Pool address lots are used by IP pools.", + "type": "string", + "enum": [ + "pool" + ] + } + ] + }, + "AddressLotResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/AddressLot" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "AddressLotViewResponse": { + "description": "An address lot and associated blocks resulting from viewing an address lot.", + "type": "object", + "properties": { + "blocks": { + "description": "The address lot blocks.", + "type": "array", + "items": { + "$ref": "#/components/schemas/AddressLotBlock" + } + }, + "lot": { + "description": "The address lot.", + "allOf": [ + { + "$ref": "#/components/schemas/AddressLot" + } + ] + } + }, + "required": [ + "blocks", + "lot" + ] + }, + "AffinityGroup": { + "description": "View of an Affinity Group", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "failure_domain": { + "$ref": "#/components/schemas/FailureDomain" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "policy": { + "$ref": "#/components/schemas/AffinityPolicy" + }, + "project_id": { + "type": "string", + "format": "uuid" + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "failure_domain", + "id", + "name", + "policy", + "project_id", + "time_created", + "time_modified" + ] + }, + "AffinityGroupCreate": { + "description": "Create-time parameters for an `AffinityGroup`", + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "failure_domain": { + "$ref": "#/components/schemas/FailureDomain" + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "policy": { + "$ref": "#/components/schemas/AffinityPolicy" + } + }, + "required": [ + "description", + "failure_domain", + "name", + "policy" + ] + }, + "AffinityGroupMember": { + "description": "A member of an Affinity Group\n\nMembership in a group is not exclusive - members may belong to multiple affinity / anti-affinity groups.\n\nAffinity Groups can contain up to 32 members.", + "oneOf": [ + { + "description": "An instance belonging to this group\n\nInstances can belong to up to 16 affinity groups.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "instance" + ] + }, + "value": { + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "run_state": { + "$ref": "#/components/schemas/InstanceState" + } + }, + "required": [ + "id", + "name", + "run_state" + ] + } + }, + "required": [ + "type", + "value" + ] + } + ] + }, + "AffinityGroupMemberResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/AffinityGroupMember" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "AffinityGroupResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/AffinityGroup" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "AffinityGroupUpdate": { + "description": "Updateable properties of an `AffinityGroup`", + "type": "object", + "properties": { + "description": { + "nullable": true, + "type": "string" + }, + "name": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + } + } + }, + "AffinityPolicy": { + "description": "Affinity policy used to describe \"what to do when a request cannot be satisfied\"\n\nUsed for both Affinity and Anti-Affinity Groups", + "oneOf": [ + { + "description": "If the affinity request cannot be satisfied, allow it anyway.\n\nThis enables a \"best-effort\" attempt to satisfy the affinity policy.", + "type": "string", + "enum": [ + "allow" + ] + }, + { + "description": "If the affinity request cannot be satisfied, fail explicitly.", + "type": "string", + "enum": [ + "fail" + ] + } + ] + }, + "AggregateBgpMessageHistory": { + "description": "BGP message history for rack switches.", + "type": "object", + "properties": { + "switch_histories": { + "description": "BGP history organized by switch.", + "type": "array", + "items": { + "$ref": "#/components/schemas/SwitchBgpHistory" + } + } + }, + "required": [ + "switch_histories" + ] + }, + "AlertClass": { + "description": "An alert class.", + "type": "object", + "properties": { + "description": { + "description": "A description of what this alert class represents.", + "type": "string" + }, + "name": { + "description": "The name of the alert class.", + "type": "string" + } + }, + "required": [ + "description", + "name" + ] + }, + "AlertClassResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/AlertClass" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "AlertDelivery": { + "description": "A delivery of a webhook event.", + "type": "object", + "properties": { + "alert_class": { + "description": "The event class.", + "type": "string" + }, + "alert_id": { + "description": "The UUID of the event.", + "type": "string", + "format": "uuid" + }, + "attempts": { + "description": "Individual attempts to deliver this webhook event, and their outcomes.", + "allOf": [ + { + "$ref": "#/components/schemas/AlertDeliveryAttempts" + } + ] + }, + "id": { + "description": "The UUID of this delivery attempt.", + "type": "string", + "format": "uuid" + }, + "receiver_id": { + "description": "The UUID of the alert receiver that this event was delivered to.", + "type": "string", + "format": "uuid" + }, + "state": { + "description": "The state of this delivery.", + "allOf": [ + { + "$ref": "#/components/schemas/AlertDeliveryState" + } + ] + }, + "time_started": { + "description": "The time at which this delivery began (i.e. the event was dispatched to the receiver).", + "type": "string", + "format": "date-time" + }, + "trigger": { + "description": "Why this delivery was performed.", + "allOf": [ + { + "$ref": "#/components/schemas/AlertDeliveryTrigger" + } + ] + } + }, + "required": [ + "alert_class", + "alert_id", + "attempts", + "id", + "receiver_id", + "state", + "time_started", + "trigger" + ] + }, + "AlertDeliveryAttempts": { + "description": "A list of attempts to deliver an alert to a receiver.\n\nThe type of the delivery attempt model depends on the receiver type, as it may contain information specific to that delivery mechanism. For example, webhook delivery attempts contain the HTTP status code of the webhook request.", + "oneOf": [ + { + "description": "A list of attempts to deliver an alert to a webhook receiver.", + "type": "object", + "properties": { + "webhook": { + "type": "array", + "items": { + "$ref": "#/components/schemas/WebhookDeliveryAttempt" + } + } + }, + "required": [ + "webhook" + ], + "additionalProperties": false + } + ] + }, + "AlertDeliveryId": { + "type": "object", + "properties": { + "delivery_id": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "delivery_id" + ] + }, + "AlertDeliveryResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/AlertDelivery" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "AlertDeliveryState": { + "description": "The state of a webhook delivery attempt.", + "oneOf": [ + { + "description": "The webhook event has not yet been delivered successfully.\n\nEither no delivery attempts have yet been performed, or the delivery has failed at least once but has retries remaining.", + "type": "string", + "enum": [ + "pending" + ] + }, + { + "description": "The webhook event has been delivered successfully.", + "type": "string", + "enum": [ + "delivered" + ] + }, + { + "description": "The webhook delivery attempt has failed permanently and will not be retried again.", + "type": "string", + "enum": [ + "failed" + ] + } + ] + }, + "AlertDeliveryTrigger": { + "description": "The reason an alert was delivered", + "oneOf": [ + { + "description": "Delivery was triggered by the alert itself.", + "type": "string", + "enum": [ + "alert" + ] + }, + { + "description": "Delivery was triggered by a request to resend the alert.", + "type": "string", + "enum": [ + "resend" + ] + }, + { + "description": "This delivery is a liveness probe.", + "type": "string", + "enum": [ + "probe" + ] + } + ] + }, + "AlertProbeResult": { + "description": "Data describing the result of an alert receiver liveness probe attempt.", + "type": "object", + "properties": { + "probe": { + "description": "The outcome of the probe delivery.", + "allOf": [ + { + "$ref": "#/components/schemas/AlertDelivery" + } + ] + }, + "resends_started": { + "nullable": true, + "description": "If the probe request succeeded, and resending failed deliveries on success was requested, the number of new delivery attempts started. Otherwise, if the probe did not succeed, or resending failed deliveries was not requested, this is null.\n\nNote that this may be 0, if there were no events found which had not been delivered successfully to this receiver.", + "type": "integer", + "format": "uint", + "minimum": 0 + } + }, + "required": [ + "probe" + ] + }, + "AlertReceiver": { + "description": "The configuration for an alert receiver.", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "kind": { + "description": "Configuration specific to the kind of alert receiver that this is.", + "allOf": [ + { + "$ref": "#/components/schemas/AlertReceiverKind" + } + ] + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "subscriptions": { + "description": "The list of alert classes to which this receiver is subscribed.", + "type": "array", + "items": { + "$ref": "#/components/schemas/AlertSubscription" + } + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "id", + "kind", + "name", + "subscriptions", + "time_created", + "time_modified" + ] + }, + "AlertReceiverKind": { + "description": "The possible alert delivery mechanisms for an alert receiver.", + "oneOf": [ + { + "description": "Webhook-specific alert receiver configuration.", + "type": "object", + "properties": { + "endpoint": { + "description": "The URL that webhook notification requests are sent to.", + "type": "string", + "format": "uri" + }, + "kind": { + "type": "string", + "enum": [ + "webhook" + ] + }, + "secrets": { + "type": "array", + "items": { + "$ref": "#/components/schemas/WebhookSecret" + } + } + }, + "required": [ + "endpoint", + "kind", + "secrets" + ] + } + ] + }, + "AlertReceiverResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/AlertReceiver" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "AlertSubscription": { + "title": "A webhook event class subscription", + "description": "A webhook event class subscription matches either a single event class exactly, or a glob pattern including wildcards that may match multiple event classes", + "type": "string", + "pattern": "^([a-zA-Z0-9_]+|\\*|\\*\\*)(\\.([a-zA-Z0-9_]+|\\*|\\*\\*))*$" + }, + "AlertSubscriptionCreate": { + "type": "object", + "properties": { + "subscription": { + "description": "The event class pattern to subscribe to.", + "allOf": [ + { + "$ref": "#/components/schemas/AlertSubscription" + } + ] + } + }, + "required": [ + "subscription" + ] + }, + "AlertSubscriptionCreated": { + "type": "object", + "properties": { + "subscription": { + "description": "The new subscription added to the receiver.", + "allOf": [ + { + "$ref": "#/components/schemas/AlertSubscription" + } + ] + } + }, + "required": [ + "subscription" + ] + }, + "AllowList": { + "description": "Allowlist of IPs or subnets that can make requests to user-facing services.", + "type": "object", + "properties": { + "allowed_ips": { + "description": "The allowlist of IPs or subnets.", + "allOf": [ + { + "$ref": "#/components/schemas/AllowedSourceIps" + } + ] + }, + "time_created": { + "description": "Time the list was created.", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "Time the list was last modified.", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "allowed_ips", + "time_created", + "time_modified" + ] + }, + "AllowListUpdate": { + "description": "Parameters for updating allowed source IPs", + "type": "object", + "properties": { + "allowed_ips": { + "description": "The new list of allowed source IPs.", + "allOf": [ + { + "$ref": "#/components/schemas/AllowedSourceIps" + } + ] + } + }, + "required": [ + "allowed_ips" + ] + }, + "AllowedSourceIps": { + "description": "Description of source IPs allowed to reach rack services.", + "oneOf": [ + { + "description": "Allow traffic from any external IP address.", + "type": "object", + "properties": { + "allow": { + "type": "string", + "enum": [ + "any" + ] + } + }, + "required": [ + "allow" + ] + }, + { + "description": "Restrict access to a specific set of source IP addresses or subnets.\n\nAll others are prevented from reaching rack services.", + "type": "object", + "properties": { + "allow": { + "type": "string", + "enum": [ + "list" + ] + }, + "ips": { + "type": "array", + "items": { + "$ref": "#/components/schemas/IpNet" + } + } + }, + "required": [ + "allow", + "ips" + ] + } + ] + }, + "AntiAffinityGroup": { + "description": "View of an Anti-Affinity Group", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "failure_domain": { + "$ref": "#/components/schemas/FailureDomain" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "policy": { + "$ref": "#/components/schemas/AffinityPolicy" + }, + "project_id": { + "type": "string", + "format": "uuid" + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "failure_domain", + "id", + "name", + "policy", + "project_id", + "time_created", + "time_modified" + ] + }, + "AntiAffinityGroupCreate": { + "description": "Create-time parameters for an `AntiAffinityGroup`", + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "failure_domain": { + "$ref": "#/components/schemas/FailureDomain" + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "policy": { + "$ref": "#/components/schemas/AffinityPolicy" + } + }, + "required": [ + "description", + "failure_domain", + "name", + "policy" + ] + }, + "AntiAffinityGroupMember": { + "description": "A member of an Anti-Affinity Group\n\nMembership in a group is not exclusive - members may belong to multiple affinity / anti-affinity groups.\n\nAnti-Affinity Groups can contain up to 32 members.", + "oneOf": [ + { + "description": "An instance belonging to this group\n\nInstances can belong to up to 16 anti-affinity groups.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "instance" + ] + }, + "value": { + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "run_state": { + "$ref": "#/components/schemas/InstanceState" + } + }, + "required": [ + "id", + "name", + "run_state" + ] + } + }, + "required": [ + "type", + "value" + ] + } + ] + }, + "AntiAffinityGroupMemberResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/AntiAffinityGroupMember" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "AntiAffinityGroupResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/AntiAffinityGroup" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "AntiAffinityGroupUpdate": { + "description": "Updateable properties of an `AntiAffinityGroup`", + "type": "object", + "properties": { + "description": { + "nullable": true, + "type": "string" + }, + "name": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + } + } + }, + "AuditLogEntry": { + "description": "Audit log entry", + "type": "object", + "properties": { + "actor": { + "$ref": "#/components/schemas/AuditLogEntryActor" + }, + "auth_method": { + "nullable": true, + "description": "How the user authenticated the request. Possible values are \"session_cookie\" and \"access_token\". Optional because it will not be defined on unauthenticated requests like login attempts.", + "type": "string" + }, + "id": { + "description": "Unique identifier for the audit log entry", + "type": "string", + "format": "uuid" + }, + "operation_id": { + "description": "API endpoint ID, e.g., `project_create`", + "type": "string" + }, + "request_id": { + "description": "Request ID for tracing requests through the system", + "type": "string" + }, + "request_uri": { + "description": "URI of the request, truncated to 512 characters. Will only include host and scheme for HTTP/2 requests. For HTTP/1.1, the URI will consist of only the path and query.", + "type": "string" + }, + "result": { + "description": "Result of the operation", + "allOf": [ + { + "$ref": "#/components/schemas/AuditLogEntryResult" + } + ] + }, + "source_ip": { + "description": "IP address that made the request", + "type": "string", + "format": "ip" + }, + "time_completed": { + "description": "Time operation completed", + "type": "string", + "format": "date-time" + }, + "time_started": { + "description": "When the request was received", + "type": "string", + "format": "date-time" + }, + "user_agent": { + "nullable": true, + "description": "User agent string from the request, truncated to 256 characters.", + "type": "string" + } + }, + "required": [ + "actor", + "id", + "operation_id", + "request_id", + "request_uri", + "result", + "source_ip", + "time_completed", + "time_started" + ] + }, + "AuditLogEntryActor": { + "oneOf": [ + { + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "user_builtin" + ] + }, + "user_builtin_id": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "kind", + "user_builtin_id" + ] + }, + { + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "silo_user" + ] + }, + "silo_id": { + "type": "string", + "format": "uuid" + }, + "silo_user_id": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "kind", + "silo_id", + "silo_user_id" + ] + }, + { + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "scim" + ] + }, + "silo_id": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "kind", + "silo_id" + ] + }, + { + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "unauthenticated" + ] + } + }, + "required": [ + "kind" + ] + } + ] + }, + "AuditLogEntryResult": { + "description": "Result of an audit log entry", + "oneOf": [ + { + "description": "The operation completed successfully", + "type": "object", + "properties": { + "http_status_code": { + "description": "HTTP status code", + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "kind": { + "type": "string", + "enum": [ + "success" + ] + } + }, + "required": [ + "http_status_code", + "kind" + ] + }, + { + "description": "The operation failed", + "type": "object", + "properties": { + "error_code": { + "nullable": true, + "type": "string" + }, + "error_message": { + "type": "string" + }, + "http_status_code": { + "description": "HTTP status code", + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "kind": { + "type": "string", + "enum": [ + "error" + ] + } + }, + "required": [ + "error_message", + "http_status_code", + "kind" + ] + }, + { + "description": "After the logged operation completed, our attempt to write the result to the audit log failed, so it was automatically marked completed later by a background job. This does not imply that the operation itself timed out or failed, only our attempts to log its result.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "unknown" + ] + } + }, + "required": [ + "kind" + ] + } + ] + }, + "AuditLogEntryResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/AuditLogEntry" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "AuthzScope": { + "description": "Authorization scope for a timeseries.\n\nThis describes the level at which a user must be authorized to read data from a timeseries. For example, fleet-scoping means the data is only visible to an operator or fleet reader. Project-scoped, on the other hand, indicates that a user will see data limited to the projects on which they have read permissions.", + "oneOf": [ + { + "description": "Timeseries data is limited to fleet readers.", + "type": "string", + "enum": [ + "fleet" + ] + }, + { + "description": "Timeseries data is limited to the authorized silo for a user.", + "type": "string", + "enum": [ + "silo" + ] + }, + { + "description": "Timeseries data is limited to the authorized projects for a user.", + "type": "string", + "enum": [ + "project" + ] + }, + { + "description": "The timeseries is viewable to all without limitation.", + "type": "string", + "enum": [ + "viewable_to_all" + ] + } + ] + }, + "Baseboard": { + "description": "Properties that uniquely identify an Oxide hardware component", + "type": "object", + "properties": { + "part": { + "type": "string" + }, + "revision": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "serial": { + "type": "string" + } + }, + "required": [ + "part", + "revision", + "serial" + ] + }, + "BfdMode": { + "description": "BFD connection mode.", + "type": "string", + "enum": [ + "single_hop", + "multi_hop" + ] + }, + "BfdSessionDisable": { + "description": "Information needed to disable a BFD session", + "type": "object", + "properties": { + "remote": { + "description": "Address of the remote peer to disable a BFD session for.", + "type": "string", + "format": "ip" + }, + "switch": { + "description": "The switch to enable this session on. Must be `switch0` or `switch1`.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + } + }, + "required": [ + "remote", + "switch" + ] + }, + "BfdSessionEnable": { + "description": "Information about a bidirectional forwarding detection (BFD) session.", + "type": "object", + "properties": { + "detection_threshold": { + "description": "The negotiated Control packet transmission interval, multiplied by this variable, will be the Detection Time for this session (as seen by the remote system)", + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "local": { + "nullable": true, + "description": "Address the Oxide switch will listen on for BFD traffic. If `None` then the unspecified address (0.0.0.0 or ::) is used.", + "type": "string", + "format": "ip" + }, + "mode": { + "description": "Select either single-hop (RFC 5881) or multi-hop (RFC 5883)", + "allOf": [ + { + "$ref": "#/components/schemas/BfdMode" + } + ] + }, + "remote": { + "description": "Address of the remote peer to establish a BFD session with.", + "type": "string", + "format": "ip" + }, + "required_rx": { + "description": "The minimum interval, in microseconds, between received BFD Control packets that this system requires", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "switch": { + "description": "The switch to enable this session on. Must be `switch0` or `switch1`.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + } + }, + "required": [ + "detection_threshold", + "mode", + "remote", + "required_rx", + "switch" + ] + }, + "BfdState": { + "oneOf": [ + { + "description": "A stable down state. Non-responsive to incoming messages.", + "type": "string", + "enum": [ + "admin_down" + ] + }, + { + "description": "The initial state.", + "type": "string", + "enum": [ + "down" + ] + }, + { + "description": "The peer has detected a remote peer in the down state.", + "type": "string", + "enum": [ + "init" + ] + }, + { + "description": "The peer has detected a remote peer in the up or init state while in the init state.", + "type": "string", + "enum": [ + "up" + ] + } + ] + }, + "BfdStatus": { + "type": "object", + "properties": { + "detection_threshold": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "local": { + "nullable": true, + "type": "string", + "format": "ip" + }, + "mode": { + "$ref": "#/components/schemas/BfdMode" + }, + "peer": { + "type": "string", + "format": "ip" + }, + "required_rx": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "state": { + "$ref": "#/components/schemas/BfdState" + }, + "switch": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "detection_threshold", + "mode", + "peer", + "required_rx", + "state", + "switch" + ] + }, + "BgpAnnounceSet": { + "description": "Represents a BGP announce set by id. The id can be used with other API calls to view and manage the announce set.", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "id", + "name", + "time_created", + "time_modified" + ] + }, + "BgpAnnounceSetCreate": { + "description": "Parameters for creating a named set of BGP announcements.", + "type": "object", + "properties": { + "announcement": { + "description": "The announcements in this set.", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpAnnouncementCreate" + } + }, + "description": { + "type": "string" + }, + "name": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "announcement", + "description", + "name" + ] + }, + "BgpAnnouncement": { + "description": "A BGP announcement tied to an address lot block.", + "type": "object", + "properties": { + "address_lot_block_id": { + "description": "The address block the IP network being announced is drawn from.", + "type": "string", + "format": "uuid" + }, + "announce_set_id": { + "description": "The id of the set this announcement is a part of.", + "type": "string", + "format": "uuid" + }, + "network": { + "description": "The IP network being announced.", + "allOf": [ + { + "$ref": "#/components/schemas/IpNet" + } + ] + } + }, + "required": [ + "address_lot_block_id", + "announce_set_id", + "network" + ] + }, + "BgpAnnouncementCreate": { + "description": "A BGP announcement tied to a particular address lot block.", + "type": "object", + "properties": { + "address_lot_block": { + "description": "Address lot this announcement is drawn from.", + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" + } + ] + }, + "network": { + "description": "The network being announced.", + "allOf": [ + { + "$ref": "#/components/schemas/IpNet" + } + ] + } + }, + "required": [ + "address_lot_block", + "network" + ] + }, + "BgpConfig": { + "description": "A base BGP configuration.", + "type": "object", + "properties": { + "asn": { + "description": "The autonomous system number of this BGP configuration.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + }, + "vrf": { + "nullable": true, + "description": "Optional virtual routing and forwarding identifier for this BGP configuration.", + "type": "string" + } + }, + "required": [ + "asn", + "description", + "id", + "name", + "time_created", + "time_modified" + ] + }, + "BgpConfigCreate": { + "description": "Parameters for creating a BGP configuration. This includes and autonomous system number (ASN) and a virtual routing and forwarding (VRF) identifier.", + "type": "object", + "properties": { + "asn": { + "description": "The autonomous system number of this BGP configuration.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "bgp_announce_set_id": { + "$ref": "#/components/schemas/NameOrId" + }, + "description": { + "type": "string" + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "vrf": { + "nullable": true, + "description": "Optional virtual routing and forwarding identifier for this BGP configuration.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + } + }, + "required": [ + "asn", + "bgp_announce_set_id", + "description", + "name" + ] + }, + "BgpConfigResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpConfig" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "BgpExported": { + "description": "The current status of a BGP peer.", + "type": "object", + "properties": { + "exports": { + "description": "Exported routes indexed by peer address.", + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Ipv4Net" + } + } + } + }, + "required": [ + "exports" + ] + }, + "BgpImportedRouteIpv4": { + "description": "A route imported from a BGP peer.", + "type": "object", + "properties": { + "id": { + "description": "BGP identifier of the originating router.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "nexthop": { + "description": "The nexthop the prefix is reachable through.", + "type": "string", + "format": "ipv4" + }, + "prefix": { + "description": "The destination network prefix.", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv4Net" + } + ] + }, + "switch": { + "description": "Switch the route is imported into.", + "allOf": [ + { + "$ref": "#/components/schemas/SwitchLocation" + } + ] + } + }, + "required": [ + "id", + "nexthop", + "prefix", + "switch" + ] + }, + "BgpMessageHistory": {}, + "BgpPeer": { + "description": "A BGP peer configuration for an interface. Includes the set of announcements that will be advertised to the peer identified by `addr`. The `bgp_config` parameter is a reference to global BGP parameters. The `interface_name` indicates what interface the peer should be contacted on.", + "type": "object", + "properties": { + "addr": { + "description": "The address of the host to peer with.", + "type": "string", + "format": "ip" + }, + "allowed_export": { + "description": "Define export policy for a peer.", + "allOf": [ + { + "$ref": "#/components/schemas/ImportExportPolicy" + } + ] + }, + "allowed_import": { + "description": "Define import policy for a peer.", + "allOf": [ + { + "$ref": "#/components/schemas/ImportExportPolicy" + } + ] + }, + "bgp_config": { + "description": "The global BGP configuration used for establishing a session with this peer.", + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" + } + ] + }, + "communities": { + "description": "Include the provided communities in updates sent to the peer.", + "type": "array", + "items": { + "type": "integer", + "format": "uint32", + "minimum": 0 + } + }, + "connect_retry": { + "description": "How long to to wait between TCP connection retries (seconds).", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "delay_open": { + "description": "How long to delay sending an open request after establishing a TCP session (seconds).", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "enforce_first_as": { + "description": "Enforce that the first AS in paths received from this peer is the peer's AS.", + "type": "boolean" + }, + "hold_time": { + "description": "How long to hold peer connections between keepalives (seconds).", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "idle_hold_time": { + "description": "How long to hold a peer in idle before attempting a new session (seconds).", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "interface_name": { + "description": "The name of interface to peer on. This is relative to the port configuration this BGP peer configuration is a part of. For example this value could be phy0 to refer to a primary physical interface. Or it could be vlan47 to refer to a VLAN interface.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "keepalive": { + "description": "How often to send keepalive requests (seconds).", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "local_pref": { + "nullable": true, + "description": "Apply a local preference to routes received from this peer.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "md5_auth_key": { + "nullable": true, + "description": "Use the given key for TCP-MD5 authentication with the peer.", + "type": "string" + }, + "min_ttl": { + "nullable": true, + "description": "Require messages from a peer have a minimum IP time to live field.", + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "multi_exit_discriminator": { + "nullable": true, + "description": "Apply the provided multi-exit discriminator (MED) updates sent to the peer.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "remote_asn": { + "nullable": true, + "description": "Require that a peer has a specified ASN.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "vlan_id": { + "nullable": true, + "description": "Associate a VLAN ID with a peer.", + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "addr", + "allowed_export", + "allowed_import", + "bgp_config", + "communities", + "connect_retry", + "delay_open", + "enforce_first_as", + "hold_time", + "idle_hold_time", + "interface_name", + "keepalive" + ] + }, + "BgpPeerConfig": { + "type": "object", + "properties": { + "link_name": { + "description": "Link that the peer is reachable on. On ports that are not broken out, this is always phy0. On a 2x breakout the options are phy0 and phy1, on 4x phy0-phy3, etc.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "peers": { + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpPeer" + } + } + }, + "required": [ + "link_name", + "peers" + ] + }, + "BgpPeerState": { + "description": "The current state of a BGP peer.", + "oneOf": [ + { + "description": "Initial state. Refuse all incoming BGP connections. No resources allocated to peer.", + "type": "string", + "enum": [ + "idle" + ] + }, + { + "description": "Waiting for the TCP connection to be completed.", + "type": "string", + "enum": [ + "connect" + ] + }, + { + "description": "Trying to acquire peer by listening for and accepting a TCP connection.", + "type": "string", + "enum": [ + "active" + ] + }, + { + "description": "Waiting for open message from peer.", + "type": "string", + "enum": [ + "open_sent" + ] + }, + { + "description": "Waiting for keepaliave or notification from peer.", + "type": "string", + "enum": [ + "open_confirm" + ] + }, + { + "description": "Synchronizing with peer.", + "type": "string", + "enum": [ + "session_setup" + ] + }, + { + "description": "Session established. Able to exchange update, notification and keepalive messages with peers.", + "type": "string", + "enum": [ + "established" + ] + } + ] + }, + "BgpPeerStatus": { + "description": "The current status of a BGP peer.", + "type": "object", + "properties": { + "addr": { + "description": "IP address of the peer.", + "type": "string", + "format": "ip" + }, + "local_asn": { + "description": "Local autonomous system number.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "remote_asn": { + "description": "Remote autonomous system number.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "state": { + "description": "State of the peer.", + "allOf": [ + { + "$ref": "#/components/schemas/BgpPeerState" + } + ] + }, + "state_duration_millis": { + "description": "Time of last state change.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "switch": { + "description": "Switch with the peer session.", + "allOf": [ + { + "$ref": "#/components/schemas/SwitchLocation" + } + ] + } + }, + "required": [ + "addr", + "local_asn", + "remote_asn", + "state", + "state_duration_millis", + "switch" + ] + }, + "BinRangedouble": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "number", + "format": "double" + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] + } + }, + "required": [ + "end", + "type" + ] + }, + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "number", + "format": "double" + }, + "start": { + "type": "number", + "format": "double" + }, + "type": { + "type": "string", + "enum": [ + "range" + ] + } + }, + "required": [ + "end", + "start", + "type" + ] + }, + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "number", + "format": "double" + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] + } + }, + "required": [ + "start", + "type" + ] + } + ] + }, + "BinRangefloat": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "number", + "format": "float" + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] + } + }, + "required": [ + "end", + "type" + ] + }, + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "number", + "format": "float" + }, + "start": { + "type": "number", + "format": "float" + }, + "type": { + "type": "string", + "enum": [ + "range" + ] + } + }, + "required": [ + "end", + "start", + "type" + ] + }, + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "number", + "format": "float" + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] + } + }, + "required": [ + "start", + "type" + ] + } + ] + }, + "BinRangeint16": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "int16" + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] + } + }, + "required": [ + "end", + "type" + ] + }, + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "int16" + }, + "start": { + "type": "integer", + "format": "int16" + }, + "type": { + "type": "string", + "enum": [ + "range" + ] + } + }, + "required": [ + "end", + "start", + "type" + ] + }, + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "integer", + "format": "int16" + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] + } + }, + "required": [ + "start", + "type" + ] + } + ] + }, + "BinRangeint32": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "int32" + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] + } + }, + "required": [ + "end", + "type" + ] + }, + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "int32" + }, + "start": { + "type": "integer", + "format": "int32" + }, + "type": { + "type": "string", + "enum": [ + "range" + ] + } + }, + "required": [ + "end", + "start", + "type" + ] + }, + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "integer", + "format": "int32" + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] + } + }, + "required": [ + "start", + "type" + ] + } + ] + }, + "BinRangeint64": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "int64" + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] + } + }, + "required": [ + "end", + "type" + ] + }, + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "int64" + }, + "start": { + "type": "integer", + "format": "int64" + }, + "type": { + "type": "string", + "enum": [ + "range" + ] + } + }, + "required": [ + "end", + "start", + "type" + ] + }, + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "integer", + "format": "int64" + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] + } + }, + "required": [ + "start", + "type" + ] + } + ] + }, + "BinRangeint8": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "int8" + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] + } + }, + "required": [ + "end", + "type" + ] + }, + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "int8" + }, + "start": { + "type": "integer", + "format": "int8" + }, + "type": { + "type": "string", + "enum": [ + "range" + ] + } + }, + "required": [ + "end", + "start", + "type" + ] + }, + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "integer", + "format": "int8" + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] + } + }, + "required": [ + "start", + "type" + ] + } + ] + }, + "BinRangeuint16": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] + } + }, + "required": [ + "end", + "type" + ] + }, + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "start": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range" + ] + } + }, + "required": [ + "end", + "start", + "type" + ] + }, + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] + } + }, + "required": [ + "start", + "type" + ] + } + ] + }, + "BinRangeuint32": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] + } + }, + "required": [ + "end", + "type" + ] + }, + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "start": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range" + ] + } + }, + "required": [ + "end", + "start", + "type" + ] + }, + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] + } + }, + "required": [ + "start", + "type" + ] + } + ] + }, + "BinRangeuint64": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] + } + }, + "required": [ + "end", + "type" + ] + }, + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "start": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range" + ] + } + }, + "required": [ + "end", + "start", + "type" + ] + }, + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] + } + }, + "required": [ + "start", + "type" + ] + } + ] + }, + "BinRangeuint8": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] + } + }, + "required": [ + "end", + "type" + ] + }, + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "start": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range" + ] + } + }, + "required": [ + "end", + "start", + "type" + ] + }, + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] + } + }, + "required": [ + "start", + "type" + ] + } + ] + }, + "Bindouble": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangedouble" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binfloat": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangefloat" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binint16": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeint16" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binint32": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeint32" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binint64": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeint64" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binint8": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeint8" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binuint16": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeuint16" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binuint32": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeuint32" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binuint64": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeuint64" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binuint8": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeuint8" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "BlockSize": { + "title": "disk block size in bytes", + "type": "integer", + "enum": [ + 512, + 2048, + 4096 + ] + }, + "ByteCount": { + "description": "Byte count to express memory or storage capacity.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "Certificate": { + "description": "View of a Certificate", + "type": "object", + "properties": { + "cert": { + "description": "PEM-formatted string containing public certificate chain", + "type": "string" + }, + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "service": { + "description": "The service using this certificate", + "allOf": [ + { + "$ref": "#/components/schemas/ServiceUsingCertificate" + } + ] + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "cert", + "description", + "id", + "name", + "service", + "time_created", + "time_modified" + ] + }, + "CertificateCreate": { + "description": "Create-time parameters for a `Certificate`", + "type": "object", + "properties": { + "cert": { + "description": "PEM-formatted string containing public certificate chain", + "type": "string" + }, + "description": { + "type": "string" + }, + "key": { + "description": "PEM-formatted string containing private key", + "type": "string" + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "service": { + "description": "The service using this certificate", + "allOf": [ + { + "$ref": "#/components/schemas/ServiceUsingCertificate" + } + ] + } + }, + "required": [ + "cert", + "description", + "key", + "name", + "service" + ] + }, + "CertificateResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/Certificate" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "ConsoleSession": { + "description": "View of a console session", + "type": "object", + "properties": { + "id": { + "description": "A unique, immutable, system-controlled identifier for the session", + "type": "string", + "format": "uuid" + }, + "time_created": { + "type": "string", + "format": "date-time" + }, + "time_last_used": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "id", + "time_created", + "time_last_used" + ] + }, + "ConsoleSessionResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/ConsoleSession" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "Cumulativedouble": { + "description": "A cumulative or counter data type.", + "type": "object", + "properties": { + "start_time": { + "type": "string", + "format": "date-time" + }, + "value": { + "type": "number", + "format": "double" + } + }, + "required": [ + "start_time", + "value" + ] + }, + "Cumulativefloat": { + "description": "A cumulative or counter data type.", + "type": "object", + "properties": { + "start_time": { + "type": "string", + "format": "date-time" + }, + "value": { + "type": "number", + "format": "float" + } + }, + "required": [ + "start_time", + "value" + ] + }, + "Cumulativeint64": { + "description": "A cumulative or counter data type.", + "type": "object", + "properties": { + "start_time": { + "type": "string", + "format": "date-time" + }, + "value": { + "type": "integer", + "format": "int64" + } + }, + "required": [ + "start_time", + "value" + ] + }, + "Cumulativeuint64": { + "description": "A cumulative or counter data type.", + "type": "object", + "properties": { + "start_time": { + "type": "string", + "format": "date-time" + }, + "value": { + "type": "integer", + "format": "uint64", + "minimum": 0 + } + }, + "required": [ + "start_time", + "value" + ] + }, + "CurrentUser": { + "description": "Info about the current user", + "type": "object", + "properties": { + "display_name": { + "description": "Human-readable name that can identify the user", + "type": "string" + }, + "fleet_viewer": { + "description": "Whether this user has the viewer role on the fleet. Used by the web console to determine whether to show system-level UI.", + "type": "boolean" + }, + "id": { + "type": "string", + "format": "uuid" + }, + "silo_admin": { + "description": "Whether this user has the admin role on their silo. Used by the web console to determine whether to show admin-only UI elements.", + "type": "boolean" + }, + "silo_id": { + "description": "Uuid of the silo to which this user belongs", + "type": "string", + "format": "uuid" + }, + "silo_name": { + "description": "Name of the silo to which this user belongs.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + } + }, + "required": [ + "display_name", + "fleet_viewer", + "id", + "silo_admin", + "silo_id", + "silo_name" + ] + }, + "Datum": { + "description": "A `Datum` is a single sampled data point from a metric.", + "oneOf": [ + { + "type": "object", + "properties": { + "datum": { + "type": "boolean" + }, + "type": { + "type": "string", + "enum": [ + "bool" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "int8" + }, + "type": { + "type": "string", + "enum": [ + "i8" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "u8" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "int16" + }, + "type": { + "type": "string", + "enum": [ + "i16" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "u16" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "int32" + }, + "type": { + "type": "string", + "enum": [ + "i32" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "u32" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "int64" + }, + "type": { + "type": "string", + "enum": [ + "i64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "u64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "number", + "format": "float" + }, + "type": { + "type": "string", + "enum": [ + "f32" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "number", + "format": "double" + }, + "type": { + "type": "string", + "enum": [ + "f64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "string" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "array", + "items": { + "type": "integer", + "format": "uint8", + "minimum": 0 + } + }, + "type": { + "type": "string", + "enum": [ + "bytes" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Cumulativeint64" + }, + "type": { + "type": "string", + "enum": [ + "cumulative_i64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Cumulativeuint64" + }, + "type": { + "type": "string", + "enum": [ + "cumulative_u64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Cumulativefloat" + }, + "type": { + "type": "string", + "enum": [ + "cumulative_f32" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Cumulativedouble" + }, + "type": { + "type": "string", + "enum": [ + "cumulative_f64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Histogramint8" + }, + "type": { + "type": "string", + "enum": [ + "histogram_i8" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Histogramuint8" + }, + "type": { + "type": "string", + "enum": [ + "histogram_u8" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Histogramint16" + }, + "type": { + "type": "string", + "enum": [ + "histogram_i16" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Histogramuint16" + }, + "type": { + "type": "string", + "enum": [ + "histogram_u16" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Histogramint32" + }, + "type": { + "type": "string", + "enum": [ + "histogram_i32" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Histogramuint32" + }, + "type": { + "type": "string", + "enum": [ + "histogram_u32" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Histogramint64" + }, + "type": { + "type": "string", + "enum": [ + "histogram_i64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Histogramuint64" + }, + "type": { + "type": "string", + "enum": [ + "histogram_u64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Histogramfloat" + }, + "type": { + "type": "string", + "enum": [ + "histogram_f32" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Histogramdouble" + }, + "type": { + "type": "string", + "enum": [ + "histogram_f64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/MissingDatum" + }, + "type": { + "type": "string", + "enum": [ + "missing" + ] + } + }, + "required": [ + "datum", + "type" + ] + } + ] + }, + "DatumType": { + "description": "The type of an individual datum of a metric.", + "type": "string", + "enum": [ + "bool", + "i8", + "u8", + "i16", + "u16", + "i32", + "u32", + "i64", + "u64", + "f32", + "f64", + "string", + "bytes", + "cumulative_i64", + "cumulative_u64", + "cumulative_f32", + "cumulative_f64", + "histogram_i8", + "histogram_u8", + "histogram_i16", + "histogram_u16", + "histogram_i32", + "histogram_u32", + "histogram_i64", + "histogram_u64", + "histogram_f32", + "histogram_f64" + ] + }, + "DerEncodedKeyPair": { + "type": "object", + "properties": { + "private_key": { + "description": "request signing RSA private key in PKCS#1 format (base64 encoded der file)", + "type": "string" + }, + "public_cert": { + "description": "request signing public certificate (base64 encoded der file)", + "type": "string" + } + }, + "required": [ + "private_key", + "public_cert" + ] + }, + "DeviceAccessToken": { + "description": "View of a device access token", + "type": "object", + "properties": { + "id": { + "description": "A unique, immutable, system-controlled identifier for the token. Note that this ID is not the bearer token itself, which starts with \"oxide-token-\"", + "type": "string", + "format": "uuid" + }, + "time_created": { + "type": "string", + "format": "date-time" + }, + "time_expires": { + "nullable": true, + "description": "Expiration timestamp. A null value means the token does not automatically expire.", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "id", + "time_created" + ] + }, + "DeviceAccessTokenRequest": { + "type": "object", + "properties": { + "client_id": { + "type": "string", + "format": "uuid" + }, + "device_code": { + "type": "string" + }, + "grant_type": { + "type": "string" + } + }, + "required": [ + "client_id", + "device_code", + "grant_type" + ] + }, + "DeviceAccessTokenResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/DeviceAccessToken" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "DeviceAuthRequest": { + "type": "object", + "properties": { + "client_id": { + "type": "string", + "format": "uuid" + }, + "ttl_seconds": { + "nullable": true, + "description": "Optional lifetime for the access token in seconds.\n\nThis value will be validated during the confirmation step. If not specified, it defaults to the silo's max TTL, which can be seen at `/v1/auth-settings`. If specified, must not exceed the silo's max TTL.\n\nSome special logic applies when authenticating the confirmation request with an existing device token: the requested TTL must not produce an expiration time later than the authenticating token's expiration. If no TTL is specified, the expiration will be the lesser of the silo max and the authenticating token's expiration time. To get the longest allowed lifetime, omit the TTL and authenticate with a web console session.", + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + "required": [ + "client_id" + ] + }, + "DeviceAuthVerify": { + "type": "object", + "properties": { + "user_code": { + "type": "string" + } + }, + "required": [ + "user_code" + ] + }, + "Digest": { + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "sha256" + ] + }, + "value": { + "type": "string" + } + }, + "required": [ + "type", + "value" + ] + } + ] + }, + "Disk": { + "description": "View of a Disk", + "type": "object", + "properties": { + "block_size": { + "$ref": "#/components/schemas/ByteCount" + }, + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "device_path": { + "type": "string" + }, + "disk_type": { + "$ref": "#/components/schemas/DiskType" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "image_id": { + "nullable": true, + "description": "ID of image from which disk was created, if any", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "project_id": { + "type": "string", + "format": "uuid" + }, + "size": { + "$ref": "#/components/schemas/ByteCount" + }, + "snapshot_id": { + "nullable": true, + "description": "ID of snapshot from which disk was created, if any", + "type": "string", + "format": "uuid" + }, + "state": { + "$ref": "#/components/schemas/DiskState" + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "block_size", + "description", + "device_path", + "disk_type", + "id", + "name", + "project_id", + "size", + "state", + "time_created", + "time_modified" + ] + }, + "DiskBackend": { + "description": "The source of a `Disk`'s blocks", + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "local" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "disk_source": { + "description": "The initial source for this disk", + "allOf": [ + { + "$ref": "#/components/schemas/DiskSource" + } + ] + }, + "type": { + "type": "string", + "enum": [ + "distributed" + ] + } + }, + "required": [ + "disk_source", + "type" + ] + } + ] + }, + "DiskCreate": { + "description": "Create-time parameters for a `Disk`", + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "disk_backend": { + "description": "The source for this `Disk`'s blocks", + "allOf": [ + { + "$ref": "#/components/schemas/DiskBackend" + } + ] + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "size": { + "description": "The total size of the Disk (in bytes)", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + } + }, + "required": [ + "description", + "disk_backend", + "name", + "size" + ] + }, + "DiskPath": { + "type": "object", + "properties": { + "disk": { + "description": "Name or ID of the disk", + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" + } + ] + } + }, + "required": [ + "disk" + ] + }, + "DiskResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/Disk" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "DiskSource": { + "description": "Different sources for a Distributed Disk", + "oneOf": [ + { + "description": "Create a blank disk", + "type": "object", + "properties": { + "block_size": { + "description": "size of blocks for this Disk. valid values are: 512, 2048, or 4096", + "allOf": [ + { + "$ref": "#/components/schemas/BlockSize" + } + ] + }, + "type": { + "type": "string", + "enum": [ + "blank" + ] + } + }, + "required": [ + "block_size", + "type" + ] + }, + { + "description": "Create a disk from a disk snapshot", + "type": "object", + "properties": { + "snapshot_id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "snapshot" + ] + } + }, + "required": [ + "snapshot_id", + "type" + ] + }, + { + "description": "Create a disk from an image", + "type": "object", + "properties": { + "image_id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "image" + ] + } + }, + "required": [ + "image_id", + "type" + ] + }, + { + "description": "Create a blank disk that will accept bulk writes or pull blocks from an external source.", + "type": "object", + "properties": { + "block_size": { + "$ref": "#/components/schemas/BlockSize" + }, + "type": { + "type": "string", + "enum": [ + "importing_blocks" + ] + } + }, + "required": [ + "block_size", + "type" + ] + } + ] + }, + "DiskState": { + "description": "State of a Disk", + "oneOf": [ + { + "description": "Disk is being initialized", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "creating" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is ready but detached from any Instance", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "detached" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is ready to receive blocks from an external source", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "import_ready" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is importing blocks from a URL", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "importing_from_url" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is importing blocks from bulk writes", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "importing_from_bulk_writes" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is being finalized to state Detached", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "finalizing" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is undergoing maintenance", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "maintenance" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is being attached to the given Instance", + "type": "object", + "properties": { + "instance": { + "type": "string", + "format": "uuid" + }, + "state": { + "type": "string", + "enum": [ + "attaching" + ] + } + }, + "required": [ + "instance", + "state" + ] + }, + { + "description": "Disk is attached to the given Instance", + "type": "object", + "properties": { + "instance": { + "type": "string", + "format": "uuid" + }, + "state": { + "type": "string", + "enum": [ + "attached" + ] + } + }, + "required": [ + "instance", + "state" + ] + }, + { + "description": "Disk is being detached from the given Instance", + "type": "object", + "properties": { + "instance": { + "type": "string", + "format": "uuid" + }, + "state": { + "type": "string", + "enum": [ + "detaching" + ] + } + }, + "required": [ + "instance", + "state" + ] + }, + { + "description": "Disk has been destroyed", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "destroyed" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is unavailable", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "faulted" + ] + } + }, + "required": [ + "state" + ] + } + ] + }, + "DiskType": { + "type": "string", + "enum": [ + "distributed", + "local" + ] + }, + "Distributiondouble": { + "description": "A distribution is a sequence of bins and counts in those bins, and some statistical information tracked to compute the mean, standard deviation, and quantile estimates.\n\nMin, max, and the p-* quantiles are treated as optional due to the possibility of distribution operations, like subtraction.", + "type": "object", + "properties": { + "bins": { + "type": "array", + "items": { + "type": "number", + "format": "double" + } + }, + "counts": { + "type": "array", + "items": { + "type": "integer", + "format": "uint64", + "minimum": 0 + } + }, + "max": { + "nullable": true, + "type": "number", + "format": "double" + }, + "min": { + "nullable": true, + "type": "number", + "format": "double" + }, + "p50": { + "nullable": true, + "type": "number", + "format": "double" + }, + "p90": { + "nullable": true, + "type": "number", + "format": "double" + }, + "p99": { + "nullable": true, + "type": "number", + "format": "double" + }, + "squared_mean": { + "type": "number", + "format": "double" + }, + "sum_of_samples": { + "type": "number", + "format": "double" + } + }, + "required": [ + "bins", + "counts", + "squared_mean", + "sum_of_samples" + ] + }, + "Distributionint64": { + "description": "A distribution is a sequence of bins and counts in those bins, and some statistical information tracked to compute the mean, standard deviation, and quantile estimates.\n\nMin, max, and the p-* quantiles are treated as optional due to the possibility of distribution operations, like subtraction.", + "type": "object", + "properties": { + "bins": { + "type": "array", + "items": { + "type": "integer", + "format": "int64" + } + }, + "counts": { + "type": "array", + "items": { + "type": "integer", + "format": "uint64", + "minimum": 0 + } + }, + "max": { + "nullable": true, + "type": "integer", + "format": "int64" + }, + "min": { + "nullable": true, + "type": "integer", + "format": "int64" + }, + "p50": { + "nullable": true, + "type": "number", + "format": "double" + }, + "p90": { + "nullable": true, + "type": "number", + "format": "double" + }, + "p99": { + "nullable": true, + "type": "number", + "format": "double" + }, + "squared_mean": { + "type": "number", + "format": "double" + }, + "sum_of_samples": { + "type": "integer", + "format": "int64" + } + }, + "required": [ + "bins", + "counts", + "squared_mean", + "sum_of_samples" + ] + }, + "EphemeralIpCreate": { + "description": "Parameters for creating an ephemeral IP address for an instance.", + "type": "object", + "properties": { + "pool": { + "nullable": true, + "description": "Name or ID of the IP pool used to allocate an address. If unspecified, the default IP pool will be used.", + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" + } + ] + } + } + }, + "Error": { + "description": "Error information from a response.", + "type": "object", + "properties": { + "error_code": { + "type": "string" + }, + "message": { + "type": "string" + }, + "request_id": { + "type": "string" + } + }, + "required": [ + "message", + "request_id" + ] + }, + "ExternalIp": { + "oneOf": [ + { + "description": "A source NAT IP address.\n\nSNAT addresses are ephemeral addresses used only for outbound connectivity.", + "type": "object", + "properties": { + "first_port": { + "description": "The first usable port within the IP address.", + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "ip": { + "description": "The IP address.", + "type": "string", + "format": "ip" + }, + "ip_pool_id": { + "description": "ID of the IP Pool from which the address is taken.", + "type": "string", + "format": "uuid" + }, + "kind": { + "type": "string", + "enum": [ + "snat" + ] + }, + "last_port": { + "description": "The last usable port within the IP address.", + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "first_port", + "ip", + "ip_pool_id", + "kind", + "last_port" + ] + }, + { + "type": "object", + "properties": { + "ip": { + "type": "string", + "format": "ip" + }, + "ip_pool_id": { + "type": "string", + "format": "uuid" + }, + "kind": { + "type": "string", + "enum": [ + "ephemeral" + ] + } + }, + "required": [ + "ip", + "ip_pool_id", + "kind" + ] + }, + { + "description": "A Floating IP is a well-known IP address which can be attached and detached from instances.", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "instance_id": { + "nullable": true, + "description": "The ID of the instance that this Floating IP is attached to, if it is presently in use.", + "type": "string", + "format": "uuid" + }, + "ip": { + "description": "The IP address held by this resource.", + "type": "string", + "format": "ip" + }, + "ip_pool_id": { + "description": "The ID of the IP pool this resource belongs to.", + "type": "string", + "format": "uuid" + }, + "kind": { + "type": "string", + "enum": [ + "floating" + ] + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "project_id": { + "description": "The project this resource exists within.", + "type": "string", + "format": "uuid" + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "id", + "ip", + "ip_pool_id", + "kind", + "name", + "project_id", + "time_created", + "time_modified" + ] + } + ] + }, + "ExternalIpCreate": { + "description": "Parameters for creating an external IP address for instances.", + "oneOf": [ + { + "description": "An IP address providing both inbound and outbound access. The address is automatically assigned from the provided IP pool or the default IP pool if not specified.", + "type": "object", + "properties": { + "pool": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" + } + ] + }, + "type": { + "type": "string", + "enum": [ + "ephemeral" + ] + } + }, + "required": [ + "type" + ] + }, + { + "description": "An IP address providing both inbound and outbound access. The address is an existing floating IP object assigned to the current project.\n\nThe floating IP must not be in use by another instance or service.", + "type": "object", + "properties": { + "floating_ip": { + "$ref": "#/components/schemas/NameOrId" + }, + "type": { + "type": "string", + "enum": [ + "floating" + ] + } + }, + "required": [ + "floating_ip", + "type" + ] + } + ] + }, + "ExternalIpResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/ExternalIp" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "FailureDomain": { + "description": "Describes the scope of affinity for the purposes of co-location.", + "oneOf": [ + { + "description": "Instances are considered co-located if they are on the same sled", + "type": "string", + "enum": [ + "sled" + ] + } + ] + }, + "FieldSchema": { + "description": "The name and type information for a field of a timeseries schema.", + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "field_type": { + "$ref": "#/components/schemas/FieldType" + }, + "name": { + "type": "string" + }, + "source": { + "$ref": "#/components/schemas/FieldSource" + } + }, + "required": [ + "description", + "field_type", + "name", + "source" + ] + }, + "FieldSource": { + "description": "The source from which a field is derived, the target or metric.", + "type": "string", + "enum": [ + "target", + "metric" + ] + }, + "FieldType": { + "description": "The `FieldType` identifies the data type of a target or metric field.", + "type": "string", + "enum": [ + "string", + "i8", + "u8", + "i16", + "u16", + "i32", + "u32", + "i64", + "u64", + "ip_addr", + "uuid", + "bool" + ] + }, + "FieldValue": { + "description": "The `FieldValue` contains the value of a target or metric field.", + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "string" + ] + }, + "value": { + "type": "string" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "i8" + ] + }, + "value": { + "type": "integer", + "format": "int8" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "u8" + ] + }, + "value": { + "type": "integer", + "format": "uint8", + "minimum": 0 + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "i16" + ] + }, + "value": { + "type": "integer", + "format": "int16" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "u16" + ] + }, + "value": { + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "i32" + ] + }, + "value": { + "type": "integer", + "format": "int32" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "u32" + ] + }, + "value": { + "type": "integer", + "format": "uint32", + "minimum": 0 + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "i64" + ] + }, + "value": { + "type": "integer", + "format": "int64" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "u64" + ] + }, + "value": { + "type": "integer", + "format": "uint64", + "minimum": 0 + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "ip_addr" + ] + }, + "value": { + "type": "string", + "format": "ip" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "uuid" + ] + }, + "value": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "bool" + ] + }, + "value": { + "type": "boolean" + } + }, + "required": [ + "type", + "value" + ] + } + ] + }, + "FinalizeDisk": { + "description": "Parameters for finalizing a disk", + "type": "object", + "properties": { + "snapshot_name": { + "nullable": true, + "description": "If specified a snapshot of the disk will be created with the given name during finalization. If not specified, a snapshot for the disk will _not_ be created. A snapshot can be manually created once the disk transitions into the `Detached` state.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + } + } + }, + "FleetRole": { + "type": "string", + "enum": [ + "admin", + "collaborator", + "viewer" + ] + }, + "FleetRolePolicy": { + "description": "Policy for a particular resource\n\nNote that the Policy only describes access granted explicitly for this resource. The policies of parent resources can also cause a user to have access to this resource.", + "type": "object", + "properties": { + "role_assignments": { + "description": "Roles directly assigned on this resource", + "type": "array", + "items": { + "$ref": "#/components/schemas/FleetRoleRoleAssignment" + } + } + }, + "required": [ + "role_assignments" + ] + }, + "FleetRoleRoleAssignment": { + "description": "Describes the assignment of a particular role on a particular resource to a particular identity (user, group, etc.)\n\nThe resource is not part of this structure. Rather, `RoleAssignment`s are put into a `Policy` and that Policy is applied to a particular resource.", + "type": "object", + "properties": { + "identity_id": { + "type": "string", + "format": "uuid" + }, + "identity_type": { + "$ref": "#/components/schemas/IdentityType" + }, + "role_name": { + "$ref": "#/components/schemas/FleetRole" + } + }, + "required": [ + "identity_id", + "identity_type", + "role_name" + ] + }, + "FloatingIp": { + "description": "A Floating IP is a well-known IP address which can be attached and detached from instances.", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "instance_id": { + "nullable": true, + "description": "The ID of the instance that this Floating IP is attached to, if it is presently in use.", + "type": "string", + "format": "uuid" + }, + "ip": { + "description": "The IP address held by this resource.", + "type": "string", + "format": "ip" + }, + "ip_pool_id": { + "description": "The ID of the IP pool this resource belongs to.", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "project_id": { + "description": "The project this resource exists within.", + "type": "string", + "format": "uuid" + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "id", + "ip", + "ip_pool_id", + "name", + "project_id", + "time_created", + "time_modified" + ] + }, + "FloatingIpAttach": { + "description": "Parameters for attaching a floating IP address to another resource", + "type": "object", + "properties": { + "kind": { + "description": "The type of `parent`'s resource", + "allOf": [ + { + "$ref": "#/components/schemas/FloatingIpParentKind" + } + ] + }, + "parent": { + "description": "Name or ID of the resource that this IP address should be attached to", + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" + } + ] + } + }, + "required": [ + "kind", + "parent" + ] + }, + "FloatingIpCreate": { + "description": "Parameters for creating a new floating IP address for instances.", + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "ip": { + "nullable": true, + "description": "An IP address to reserve for use as a floating IP. This field is optional: when not set, an address will be automatically chosen from `pool`. If set, then the IP must be available in the resolved `pool`.", + "type": "string", + "format": "ip" + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "pool": { + "nullable": true, + "description": "The parent IP pool that a floating IP is pulled from. If unset, the default pool is selected.", + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" + } + ] + } + }, + "required": [ + "description", + "name" + ] + }, + "FloatingIpParentKind": { + "description": "The type of resource that a floating IP is attached to", + "type": "string", + "enum": [ + "instance" + ] + }, + "FloatingIpResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/FloatingIp" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "FloatingIpUpdate": { + "description": "Updateable identity-related parameters", + "type": "object", + "properties": { + "description": { + "nullable": true, + "type": "string" + }, + "name": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + } + } + }, + "Group": { + "description": "View of a Group", + "type": "object", + "properties": { + "display_name": { + "description": "Human-readable name that can identify the group", + "type": "string" + }, + "id": { + "type": "string", + "format": "uuid" + }, + "silo_id": { + "description": "Uuid of the silo to which this group belongs", + "type": "string", + "format": "uuid" + } + }, + "required": [ + "display_name", + "id", + "silo_id" + ] + }, + "GroupResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/Group" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "Histogramdouble": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "description": "The bins of the histogram.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Bindouble" + } + }, + "max": { + "description": "The maximum value of all samples in the histogram.", + "type": "number", + "format": "double" + }, + "min": { + "description": "The minimum value of all samples in the histogram.", + "type": "number", + "format": "double" + }, + "n_samples": { + "description": "The total number of samples in the histogram.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "p50": { + "description": "p50 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p90": { + "description": "p95 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p99": { + "description": "p99 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "squared_mean": { + "description": "M2 for Welford's algorithm for variance calculation.\n\nRead about [Welford's algorithm](https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm) for more information on the algorithm.", + "type": "number", + "format": "double" + }, + "start_time": { + "description": "The start time of the histogram.", + "type": "string", + "format": "date-time" + }, + "sum_of_samples": { + "description": "The sum of all samples in the histogram.", + "type": "number", + "format": "double" + } + }, + "required": [ + "bins", + "max", + "min", + "n_samples", + "p50", + "p90", + "p99", + "squared_mean", + "start_time", + "sum_of_samples" + ] + }, + "Histogramfloat": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "description": "The bins of the histogram.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Binfloat" + } + }, + "max": { + "description": "The maximum value of all samples in the histogram.", + "type": "number", + "format": "float" + }, + "min": { + "description": "The minimum value of all samples in the histogram.", + "type": "number", + "format": "float" + }, + "n_samples": { + "description": "The total number of samples in the histogram.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "p50": { + "description": "p50 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p90": { + "description": "p95 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p99": { + "description": "p99 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "squared_mean": { + "description": "M2 for Welford's algorithm for variance calculation.\n\nRead about [Welford's algorithm](https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm) for more information on the algorithm.", + "type": "number", + "format": "double" + }, + "start_time": { + "description": "The start time of the histogram.", + "type": "string", + "format": "date-time" + }, + "sum_of_samples": { + "description": "The sum of all samples in the histogram.", + "type": "number", + "format": "double" + } + }, + "required": [ + "bins", + "max", + "min", + "n_samples", + "p50", + "p90", + "p99", + "squared_mean", + "start_time", + "sum_of_samples" + ] + }, + "Histogramint16": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "description": "The bins of the histogram.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Binint16" + } + }, + "max": { + "description": "The maximum value of all samples in the histogram.", + "type": "integer", + "format": "int16" + }, + "min": { + "description": "The minimum value of all samples in the histogram.", + "type": "integer", + "format": "int16" + }, + "n_samples": { + "description": "The total number of samples in the histogram.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "p50": { + "description": "p50 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p90": { + "description": "p95 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p99": { + "description": "p99 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "squared_mean": { + "description": "M2 for Welford's algorithm for variance calculation.\n\nRead about [Welford's algorithm](https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm) for more information on the algorithm.", + "type": "number", + "format": "double" + }, + "start_time": { + "description": "The start time of the histogram.", + "type": "string", + "format": "date-time" + }, + "sum_of_samples": { + "description": "The sum of all samples in the histogram.", + "type": "integer", + "format": "int64" + } + }, + "required": [ + "bins", + "max", + "min", + "n_samples", + "p50", + "p90", + "p99", + "squared_mean", + "start_time", + "sum_of_samples" + ] + }, + "Histogramint32": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "description": "The bins of the histogram.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Binint32" + } + }, + "max": { + "description": "The maximum value of all samples in the histogram.", + "type": "integer", + "format": "int32" + }, + "min": { + "description": "The minimum value of all samples in the histogram.", + "type": "integer", + "format": "int32" + }, + "n_samples": { + "description": "The total number of samples in the histogram.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "p50": { + "description": "p50 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p90": { + "description": "p95 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p99": { + "description": "p99 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "squared_mean": { + "description": "M2 for Welford's algorithm for variance calculation.\n\nRead about [Welford's algorithm](https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm) for more information on the algorithm.", + "type": "number", + "format": "double" + }, + "start_time": { + "description": "The start time of the histogram.", + "type": "string", + "format": "date-time" + }, + "sum_of_samples": { + "description": "The sum of all samples in the histogram.", + "type": "integer", + "format": "int64" + } + }, + "required": [ + "bins", + "max", + "min", + "n_samples", + "p50", + "p90", + "p99", + "squared_mean", + "start_time", + "sum_of_samples" + ] + }, + "Histogramint64": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "description": "The bins of the histogram.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Binint64" + } + }, + "max": { + "description": "The maximum value of all samples in the histogram.", + "type": "integer", + "format": "int64" + }, + "min": { + "description": "The minimum value of all samples in the histogram.", + "type": "integer", + "format": "int64" + }, + "n_samples": { + "description": "The total number of samples in the histogram.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "p50": { + "description": "p50 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p90": { + "description": "p95 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p99": { + "description": "p99 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "squared_mean": { + "description": "M2 for Welford's algorithm for variance calculation.\n\nRead about [Welford's algorithm](https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm) for more information on the algorithm.", + "type": "number", + "format": "double" + }, + "start_time": { + "description": "The start time of the histogram.", + "type": "string", + "format": "date-time" + }, + "sum_of_samples": { + "description": "The sum of all samples in the histogram.", + "type": "integer", + "format": "int64" + } + }, + "required": [ + "bins", + "max", + "min", + "n_samples", + "p50", + "p90", + "p99", + "squared_mean", + "start_time", + "sum_of_samples" + ] + }, + "Histogramint8": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "description": "The bins of the histogram.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Binint8" + } + }, + "max": { + "description": "The maximum value of all samples in the histogram.", + "type": "integer", + "format": "int8" + }, + "min": { + "description": "The minimum value of all samples in the histogram.", + "type": "integer", + "format": "int8" + }, + "n_samples": { + "description": "The total number of samples in the histogram.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "p50": { + "description": "p50 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p90": { + "description": "p95 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p99": { + "description": "p99 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "squared_mean": { + "description": "M2 for Welford's algorithm for variance calculation.\n\nRead about [Welford's algorithm](https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm) for more information on the algorithm.", + "type": "number", + "format": "double" + }, + "start_time": { + "description": "The start time of the histogram.", + "type": "string", + "format": "date-time" + }, + "sum_of_samples": { + "description": "The sum of all samples in the histogram.", + "type": "integer", + "format": "int64" + } + }, + "required": [ + "bins", + "max", + "min", + "n_samples", + "p50", + "p90", + "p99", + "squared_mean", + "start_time", + "sum_of_samples" + ] + }, + "Histogramuint16": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "description": "The bins of the histogram.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Binuint16" + } + }, + "max": { + "description": "The maximum value of all samples in the histogram.", + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "min": { + "description": "The minimum value of all samples in the histogram.", + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "n_samples": { + "description": "The total number of samples in the histogram.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "p50": { + "description": "p50 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p90": { + "description": "p95 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p99": { + "description": "p99 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "squared_mean": { + "description": "M2 for Welford's algorithm for variance calculation.\n\nRead about [Welford's algorithm](https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm) for more information on the algorithm.", + "type": "number", + "format": "double" + }, + "start_time": { + "description": "The start time of the histogram.", + "type": "string", + "format": "date-time" + }, + "sum_of_samples": { + "description": "The sum of all samples in the histogram.", + "type": "integer", + "format": "int64" + } + }, + "required": [ + "bins", + "max", + "min", + "n_samples", + "p50", + "p90", + "p99", + "squared_mean", + "start_time", + "sum_of_samples" + ] + }, + "Histogramuint32": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "description": "The bins of the histogram.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Binuint32" + } + }, + "max": { + "description": "The maximum value of all samples in the histogram.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "min": { + "description": "The minimum value of all samples in the histogram.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "n_samples": { + "description": "The total number of samples in the histogram.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "p50": { + "description": "p50 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p90": { + "description": "p95 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p99": { + "description": "p99 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "squared_mean": { + "description": "M2 for Welford's algorithm for variance calculation.\n\nRead about [Welford's algorithm](https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm) for more information on the algorithm.", + "type": "number", + "format": "double" + }, + "start_time": { + "description": "The start time of the histogram.", + "type": "string", + "format": "date-time" + }, + "sum_of_samples": { + "description": "The sum of all samples in the histogram.", + "type": "integer", + "format": "int64" + } + }, + "required": [ + "bins", + "max", + "min", + "n_samples", + "p50", + "p90", + "p99", + "squared_mean", + "start_time", + "sum_of_samples" + ] + }, + "Histogramuint64": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "description": "The bins of the histogram.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Binuint64" + } + }, + "max": { + "description": "The maximum value of all samples in the histogram.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "min": { + "description": "The minimum value of all samples in the histogram.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "n_samples": { + "description": "The total number of samples in the histogram.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "p50": { + "description": "p50 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p90": { + "description": "p95 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p99": { + "description": "p99 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "squared_mean": { + "description": "M2 for Welford's algorithm for variance calculation.\n\nRead about [Welford's algorithm](https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm) for more information on the algorithm.", + "type": "number", + "format": "double" + }, + "start_time": { + "description": "The start time of the histogram.", + "type": "string", + "format": "date-time" + }, + "sum_of_samples": { + "description": "The sum of all samples in the histogram.", + "type": "integer", + "format": "int64" + } + }, + "required": [ + "bins", + "max", + "min", + "n_samples", + "p50", + "p90", + "p99", + "squared_mean", + "start_time", + "sum_of_samples" + ] + }, + "Histogramuint8": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "description": "The bins of the histogram.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Binuint8" + } + }, + "max": { + "description": "The maximum value of all samples in the histogram.", + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "min": { + "description": "The minimum value of all samples in the histogram.", + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "n_samples": { + "description": "The total number of samples in the histogram.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "p50": { + "description": "p50 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p90": { + "description": "p95 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p99": { + "description": "p99 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "squared_mean": { + "description": "M2 for Welford's algorithm for variance calculation.\n\nRead about [Welford's algorithm](https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm) for more information on the algorithm.", + "type": "number", + "format": "double" + }, + "start_time": { + "description": "The start time of the histogram.", + "type": "string", + "format": "date-time" + }, + "sum_of_samples": { + "description": "The sum of all samples in the histogram.", + "type": "integer", + "format": "int64" + } + }, + "required": [ + "bins", + "max", + "min", + "n_samples", + "p50", + "p90", + "p99", + "squared_mean", + "start_time", + "sum_of_samples" + ] + }, + "Hostname": { + "title": "An RFC-1035-compliant hostname", + "description": "A hostname identifies a host on a network, and is usually a dot-delimited sequence of labels, where each label contains only letters, digits, or the hyphen. See RFCs 1035 and 952 for more details.", + "type": "string", + "pattern": "^([a-zA-Z0-9]+[a-zA-Z0-9\\-]*(? 2**53 addresses), integer precision will be lost, in exchange for representing the entire range. In such a case the pool still has many available addresses.", + "type": "object", + "properties": { + "capacity": { + "description": "The total number of addresses in the pool.", + "type": "number", + "format": "double" + }, + "remaining": { + "description": "The number of remaining addresses in the pool.", + "type": "number", + "format": "double" + } + }, + "required": [ + "capacity", + "remaining" + ] + }, + "IpRange": { + "oneOf": [ + { + "title": "v4", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv4Range" + } + ] + }, + { + "title": "v6", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv6Range" + } + ] + } + ] + }, + "IpVersion": { + "description": "The IP address version.", + "type": "string", + "enum": [ + "v4", + "v6" + ] + }, + "Ipv4Net": { + "example": "192.168.1.0/24", + "title": "An IPv4 subnet", + "description": "An IPv4 subnet, including prefix and prefix length", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv4Net", + "version": "0.1.0" + }, + "type": "string", + "pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|1[0-9]|2[0-9]|3[0-2])$" + }, + "Ipv4Range": { + "description": "A non-decreasing IPv4 address range, inclusive of both ends.\n\nThe first address must be less than or equal to the last address.", + "type": "object", + "properties": { + "first": { + "type": "string", + "format": "ipv4" + }, + "last": { + "type": "string", + "format": "ipv4" + } + }, + "required": [ + "first", + "last" + ] + }, + "Ipv6Net": { + "example": "fd12:3456::/64", + "title": "An IPv6 subnet", + "description": "An IPv6 subnet, including prefix and subnet mask", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv6Net", + "version": "0.1.0" + }, + "type": "string", + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$" + }, + "Ipv6Range": { + "description": "A non-decreasing IPv6 address range, inclusive of both ends.\n\nThe first address must be less than or equal to the last address.", + "type": "object", + "properties": { + "first": { + "type": "string", + "format": "ipv6" + }, + "last": { + "type": "string", + "format": "ipv6" + } + }, + "required": [ + "first", + "last" + ] + }, + "L4PortRange": { + "example": "22", + "title": "A range of IP ports", + "description": "An inclusive-inclusive range of IP ports. The second port may be omitted to represent a single port.", + "type": "string", + "pattern": "^[0-9]{1,5}(-[0-9]{1,5})?$", + "minLength": 1, + "maxLength": 11 + }, + "LinkConfigCreate": { + "description": "Switch link configuration.", + "type": "object", + "properties": { + "autoneg": { + "description": "Whether or not to set autonegotiation.", + "type": "boolean" + }, + "fec": { + "nullable": true, + "description": "The requested forward-error correction method. If this is not specified, the standard FEC for the underlying media will be applied if it can be determined.", + "allOf": [ + { + "$ref": "#/components/schemas/LinkFec" + } + ] + }, + "link_name": { + "description": "Link name. On ports that are not broken out, this is always phy0. On a 2x breakout the options are phy0 and phy1, on 4x phy0-phy3, etc.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "lldp": { + "description": "The link-layer discovery protocol (LLDP) configuration for the link.", + "allOf": [ + { + "$ref": "#/components/schemas/LldpLinkConfigCreate" + } + ] + }, + "mtu": { + "description": "Maximum transmission unit for the link.", + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "speed": { + "description": "The speed of the link.", + "allOf": [ + { + "$ref": "#/components/schemas/LinkSpeed" + } + ] + }, + "tx_eq": { + "nullable": true, + "description": "Optional tx_eq settings.", + "allOf": [ + { + "$ref": "#/components/schemas/TxEqConfig" + } + ] + } + }, + "required": [ + "autoneg", + "link_name", + "lldp", + "mtu", + "speed" + ] + }, + "LinkFec": { + "description": "The forward error correction mode of a link.", + "oneOf": [ + { + "description": "Firecode forward error correction.", + "type": "string", + "enum": [ + "firecode" + ] + }, + { + "description": "No forward error correction.", + "type": "string", + "enum": [ + "none" + ] + }, + { + "description": "Reed-Solomon forward error correction.", + "type": "string", + "enum": [ + "rs" + ] + } + ] + }, + "LinkSpeed": { + "description": "The speed of a link.", + "oneOf": [ + { + "description": "Zero gigabits per second.", + "type": "string", + "enum": [ + "speed0_g" + ] + }, + { + "description": "1 gigabit per second.", + "type": "string", + "enum": [ + "speed1_g" + ] + }, + { + "description": "10 gigabits per second.", + "type": "string", + "enum": [ + "speed10_g" + ] + }, + { + "description": "25 gigabits per second.", + "type": "string", + "enum": [ + "speed25_g" + ] + }, + { + "description": "40 gigabits per second.", + "type": "string", + "enum": [ + "speed40_g" + ] + }, + { + "description": "50 gigabits per second.", + "type": "string", + "enum": [ + "speed50_g" + ] + }, + { + "description": "100 gigabits per second.", + "type": "string", + "enum": [ + "speed100_g" + ] + }, + { + "description": "200 gigabits per second.", + "type": "string", + "enum": [ + "speed200_g" + ] + }, + { + "description": "400 gigabits per second.", + "type": "string", + "enum": [ + "speed400_g" + ] + } + ] + }, + "LldpLinkConfig": { + "description": "A link layer discovery protocol (LLDP) service configuration.", + "type": "object", + "properties": { + "chassis_id": { + "nullable": true, + "description": "The LLDP chassis identifier TLV.", + "type": "string" + }, + "enabled": { + "description": "Whether or not the LLDP service is enabled.", + "type": "boolean" + }, + "id": { + "description": "The id of this LLDP service instance.", + "type": "string", + "format": "uuid" + }, + "link_description": { + "nullable": true, + "description": "The LLDP link description TLV.", + "type": "string" + }, + "link_name": { + "nullable": true, + "description": "The LLDP link name TLV.", + "type": "string" + }, + "management_ip": { + "nullable": true, + "description": "The LLDP management IP TLV.", + "type": "string", + "format": "ip" + }, + "system_description": { + "nullable": true, + "description": "The LLDP system description TLV.", + "type": "string" + }, + "system_name": { + "nullable": true, + "description": "The LLDP system name TLV.", + "type": "string" + } + }, + "required": [ + "enabled", + "id" + ] + }, + "LldpLinkConfigCreate": { + "description": "The LLDP configuration associated with a port.", + "type": "object", + "properties": { + "chassis_id": { + "nullable": true, + "description": "The LLDP chassis identifier TLV.", + "type": "string" + }, + "enabled": { + "description": "Whether or not LLDP is enabled.", + "type": "boolean" + }, + "link_description": { + "nullable": true, + "description": "The LLDP link description TLV.", + "type": "string" + }, + "link_name": { + "nullable": true, + "description": "The LLDP link name TLV.", + "type": "string" + }, + "management_ip": { + "nullable": true, + "description": "The LLDP management IP TLV.", + "type": "string", + "format": "ip" + }, + "system_description": { + "nullable": true, + "description": "The LLDP system description TLV.", + "type": "string" + }, + "system_name": { + "nullable": true, + "description": "The LLDP system name TLV.", + "type": "string" + } + }, + "required": [ + "enabled" + ] + }, + "LldpNeighbor": { + "description": "Information about LLDP advertisements from other network entities directly connected to a switch port. This structure contains both metadata about when and where the neighbor was seen, as well as the specific information the neighbor was advertising.", + "type": "object", + "properties": { + "chassis_id": { + "description": "The LLDP chassis identifier advertised by the neighbor", + "type": "string" + }, + "first_seen": { + "description": "Initial sighting of this LldpNeighbor", + "type": "string", + "format": "date-time" + }, + "last_seen": { + "description": "Most recent sighting of this LldpNeighbor", + "type": "string", + "format": "date-time" + }, + "link_description": { + "nullable": true, + "description": "The LLDP link description advertised by the neighbor", + "type": "string" + }, + "link_name": { + "description": "The LLDP link name advertised by the neighbor", + "type": "string" + }, + "local_port": { + "description": "The port on which the neighbor was seen", + "type": "string" + }, + "management_ip": { + "description": "The LLDP management IP(s) advertised by the neighbor", + "type": "array", + "items": { + "$ref": "#/components/schemas/ManagementAddress" + } + }, + "system_description": { + "nullable": true, + "description": "The LLDP system description advertised by the neighbor", + "type": "string" + }, + "system_name": { + "nullable": true, + "description": "The LLDP system name advertised by the neighbor", + "type": "string" + } + }, + "required": [ + "chassis_id", + "first_seen", + "last_seen", + "link_name", + "local_port", + "management_ip" + ] + }, + "LldpNeighborResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/LldpNeighbor" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "LoopbackAddress": { + "description": "A loopback address is an address that is assigned to a rack switch but is not associated with any particular port.", + "type": "object", + "properties": { + "address": { + "description": "The loopback IP address and prefix length.", + "allOf": [ + { + "$ref": "#/components/schemas/IpNet" + } + ] + }, + "address_lot_block_id": { + "description": "The address lot block this address came from.", + "type": "string", + "format": "uuid" + }, + "id": { + "description": "The id of the loopback address.", + "type": "string", + "format": "uuid" + }, + "rack_id": { + "description": "The id of the rack where this loopback address is assigned.", + "type": "string", + "format": "uuid" + }, + "switch_location": { + "description": "Switch location where this loopback address is assigned.", + "type": "string" + } + }, + "required": [ + "address", + "address_lot_block_id", + "id", + "rack_id", + "switch_location" + ] + }, + "LoopbackAddressCreate": { + "description": "Parameters for creating a loopback address on a particular rack switch.", + "type": "object", + "properties": { + "address": { + "description": "The address to create.", + "type": "string", + "format": "ip" + }, + "address_lot": { + "description": "The name or id of the address lot this loopback address will pull an address from.", + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" + } + ] + }, + "anycast": { + "description": "Address is an anycast address. This allows the address to be assigned to multiple locations simultaneously.", + "type": "boolean" + }, + "mask": { + "description": "The subnet mask to use for the address.", + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "rack_id": { + "description": "The rack containing the switch this loopback address will be configured on.", + "type": "string", + "format": "uuid" + }, + "switch_location": { + "description": "The location of the switch within the rack this loopback address will be configured on.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + } + }, + "required": [ + "address", + "address_lot", + "anycast", + "mask", + "rack_id", + "switch_location" + ] + }, + "LoopbackAddressResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoopbackAddress" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "MacAddr": { + "example": "ff:ff:ff:ff:ff:ff", + "title": "A MAC address", + "description": "A Media Access Control address, in EUI-48 format", + "type": "string", + "pattern": "^([0-9a-fA-F]{0,2}:){5}[0-9a-fA-F]{0,2}$", + "minLength": 5, + "maxLength": 17 + }, + "ManagementAddress": { + "type": "object", + "properties": { + "addr": { + "$ref": "#/components/schemas/NetworkAddress" + }, + "interface_num": { + "$ref": "#/components/schemas/InterfaceNum" + }, + "oid": { + "nullable": true, + "type": "array", + "items": { + "type": "integer", + "format": "uint8", + "minimum": 0 + } + } + }, + "required": [ + "addr", + "interface_num" + ] + }, + "Measurement": { + "description": "A `Measurement` is a timestamped datum from a single metric", + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Datum" + }, + "timestamp": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "datum", + "timestamp" + ] + }, + "MeasurementResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/Measurement" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "MetricType": { + "description": "The type of the metric itself, indicating what its values represent.", + "oneOf": [ + { + "description": "The value represents an instantaneous measurement in time.", + "type": "string", + "enum": [ + "gauge" + ] + }, + { + "description": "The value represents a difference between two points in time.", + "type": "string", + "enum": [ + "delta" + ] + }, + { + "description": "The value represents an accumulation between two points in time.", + "type": "string", + "enum": [ + "cumulative" + ] + } + ] + }, + "MissingDatum": { + "type": "object", + "properties": { + "datum_type": { + "$ref": "#/components/schemas/DatumType" + }, + "start_time": { + "nullable": true, + "type": "string", + "format": "date-time" + } + }, + "required": [ + "datum_type" + ] + }, + "MulticastGroup": { + "description": "View of a Multicast Group", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "ip_pool_id": { + "description": "The ID of the IP pool this resource belongs to.", + "type": "string", + "format": "uuid" + }, + "multicast_ip": { + "description": "The multicast IP address held by this resource.", + "type": "string", + "format": "ip" + }, + "mvlan": { + "nullable": true, + "description": "Multicast VLAN (MVLAN) for egress multicast traffic to upstream networks. None means no VLAN tagging on egress.", + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "source_ips": { + "description": "Source IP addresses for Source-Specific Multicast (SSM). Empty array means any source is allowed.", + "type": "array", + "items": { + "type": "string", + "format": "ip" + } + }, + "state": { + "description": "Current state of the multicast group.", + "type": "string" + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "id", + "ip_pool_id", + "multicast_ip", + "name", + "source_ips", + "state", + "time_created", + "time_modified" + ] + }, + "MulticastGroupIdentifier": { + "title": "A multicast group identifier", + "description": "Can be a UUID, a name, or an IP address", + "type": "string" + }, + "MulticastGroupMember": { + "description": "View of a Multicast Group Member (instance belonging to a multicast group)", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "instance_id": { + "description": "The ID of the instance that is a member of this group.", + "type": "string", + "format": "uuid" + }, + "multicast_group_id": { + "description": "The ID of the multicast group this member belongs to.", + "type": "string", + "format": "uuid" + }, + "multicast_ip": { + "description": "The multicast IP address of the group this member belongs to.", + "type": "string", + "format": "ip" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "state": { + "description": "Current state of the multicast group membership.", + "type": "string" + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "id", + "instance_id", + "multicast_group_id", + "multicast_ip", + "name", + "state", + "time_created", + "time_modified" + ] + }, + "MulticastGroupMemberAdd": { + "description": "Parameters for adding an instance to a multicast group.", + "type": "object", + "properties": { + "instance": { + "description": "Name or ID of the instance to add to the multicast group", + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" + } + ] + }, + "source_ips": { + "nullable": true, + "description": "Optional Source IP addresses for Source-Specific Multicast (SSM).\n\nIf the group already exists: - If `source_ips` is specified, validates they match the group's sources (no implicit update is performed).\n\nIf the group doesn't exist (implicit creation): - If `source_ips` is specified and non-empty, attempts to create an SSM group using these sources. - If omitted or empty, creates an ASM group.", + "default": null, + "type": "array", + "items": { + "type": "string", + "format": "ip" + } + } + }, + "required": [ + "instance" + ] + }, + "MulticastGroupMemberResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/MulticastGroupMember" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "MulticastGroupResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/MulticastGroup" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "Name": { + "title": "A name unique within the parent collection", + "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID, but they may contain a UUID. They can be at most 63 characters long.", + "type": "string", + "pattern": "^(?![0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$)^[a-z]([a-zA-Z0-9-]*[a-zA-Z0-9]+)?$", + "minLength": 1, + "maxLength": 63 + }, + "NameOrId": { + "oneOf": [ + { + "title": "id", + "allOf": [ + { + "type": "string", + "format": "uuid" + } + ] + }, + { + "title": "name", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + } + ] + }, + "NetworkAddress": { + "oneOf": [ + { + "type": "object", + "properties": { + "ip_addr": { + "type": "string", + "format": "ip" + } + }, + "required": [ + "ip_addr" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": { + "i_e_e_e802": { + "type": "array", + "items": { + "type": "integer", + "format": "uint8", + "minimum": 0 + } + } + }, + "required": [ + "i_e_e_e802" + ], + "additionalProperties": false + } + ] + }, + "NetworkInterface": { + "description": "Information required to construct a virtual network interface", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "ip": { + "type": "string", + "format": "ip" + }, + "kind": { + "$ref": "#/components/schemas/NetworkInterfaceKind" + }, + "mac": { + "$ref": "#/components/schemas/MacAddr" + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "primary": { + "type": "boolean" + }, + "slot": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "subnet": { + "$ref": "#/components/schemas/IpNet" + }, + "transit_ips": { + "default": [], + "type": "array", + "items": { + "$ref": "#/components/schemas/IpNet" + } + }, + "vni": { + "$ref": "#/components/schemas/Vni" + } + }, + "required": [ + "id", + "ip", + "kind", + "mac", + "name", + "primary", + "slot", + "subnet", + "vni" + ] + }, + "NetworkInterfaceKind": { + "description": "The type of network interface", + "oneOf": [ + { + "description": "A vNIC attached to a guest instance", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "instance" + ] + } + }, + "required": [ + "id", + "type" + ] + }, + { + "description": "A vNIC associated with an internal service", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "service" + ] + } + }, + "required": [ + "id", + "type" + ] + }, + { + "description": "A vNIC associated with a probe", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "probe" + ] + } + }, + "required": [ + "id", + "type" + ] + } + ] + }, + "OxqlQueryResult": { + "description": "The result of a successful OxQL query.", + "type": "object", + "properties": { + "tables": { + "description": "Tables resulting from the query, each containing timeseries.", + "type": "array", + "items": { + "$ref": "#/components/schemas/OxqlTable" + } + } + }, + "required": [ + "tables" + ] + }, + "OxqlTable": { + "description": "A table represents one or more timeseries with the same schema.\n\nA table is the result of an OxQL query. It contains a name, usually the name of the timeseries schema from which the data is derived, and any number of timeseries, which contain the actual data.", + "type": "object", + "properties": { + "name": { + "description": "The name of the table.", + "type": "string" + }, + "timeseries": { + "description": "The set of timeseries in the table, ordered by key.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Timeseries" + } + } + }, + "required": [ + "name", + "timeseries" + ] + }, + "Password": { + "title": "A password used to authenticate a user", + "description": "Passwords may be subject to additional constraints.", + "type": "string", + "maxLength": 512 + }, + "PhysicalDisk": { + "description": "View of a Physical Disk\n\nPhysical disks reside in a particular sled and are used to store both Instance Disk data as well as internal metadata.", + "type": "object", + "properties": { + "form_factor": { + "$ref": "#/components/schemas/PhysicalDiskKind" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "model": { + "type": "string" + }, + "policy": { + "description": "The operator-defined policy for a physical disk.", + "allOf": [ + { + "$ref": "#/components/schemas/PhysicalDiskPolicy" + } + ] + }, + "serial": { + "type": "string" + }, + "sled_id": { + "nullable": true, + "description": "The sled to which this disk is attached, if any.", + "type": "string", + "format": "uuid" + }, + "state": { + "description": "The current state Nexus believes the disk to be in.", + "allOf": [ + { + "$ref": "#/components/schemas/PhysicalDiskState" + } + ] + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + }, + "vendor": { + "type": "string" + } + }, + "required": [ + "form_factor", + "id", + "model", + "policy", + "serial", + "state", + "time_created", + "time_modified", + "vendor" + ] + }, + "PhysicalDiskKind": { + "description": "Describes the form factor of physical disks.", + "type": "string", + "enum": [ + "m2", + "u2" + ] + }, + "PhysicalDiskPolicy": { + "description": "The operator-defined policy of a physical disk.", + "oneOf": [ + { + "description": "The operator has indicated that the disk is in-service.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "in_service" + ] + } + }, + "required": [ + "kind" + ] + }, + { + "description": "The operator has indicated that the disk has been permanently removed from service.\n\nThis is a terminal state: once a particular disk ID is expunged, it will never return to service. (The actual hardware may be reused, but it will be treated as a brand-new disk.)\n\nAn expunged disk is always non-provisionable.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "expunged" + ] + } + }, + "required": [ + "kind" + ] + } + ] + }, + "PhysicalDiskResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/PhysicalDisk" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "PhysicalDiskState": { + "description": "The current state of the disk, as determined by Nexus.", + "oneOf": [ + { + "description": "The disk is currently active, and has resources allocated on it.", + "type": "string", + "enum": [ + "active" + ] + }, + { + "description": "The disk has been permanently removed from service.\n\nThis is a terminal state: once a particular disk ID is decommissioned, it will never return to service. (The actual hardware may be reused, but it will be treated as a brand-new disk.)", + "type": "string", + "enum": [ + "decommissioned" + ] + } + ] + }, + "Ping": { + "type": "object", + "properties": { + "status": { + "description": "Whether the external API is reachable. Will always be Ok if the endpoint returns anything at all.", + "allOf": [ + { + "$ref": "#/components/schemas/PingStatus" + } + ] + } + }, + "required": [ + "status" + ] + }, + "PingStatus": { + "type": "string", + "enum": [ + "ok" + ] + }, + "Points": { + "description": "Timepoints and values for one timeseries.", + "type": "object", + "properties": { + "start_times": { + "nullable": true, + "type": "array", + "items": { + "type": "string", + "format": "date-time" + } + }, + "timestamps": { + "type": "array", + "items": { + "type": "string", + "format": "date-time" + } + }, + "values": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Values" + } + } + }, + "required": [ + "timestamps", + "values" + ] + }, + "Probe": { + "description": "Identity-related metadata that's included in nearly all public API objects", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "sled": { + "type": "string", + "format": "uuid" + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "id", + "name", + "sled", + "time_created", + "time_modified" + ] + }, + "ProbeCreate": { + "description": "Create time parameters for probes.", + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "ip_pool": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" + } + ] + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "sled": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "description", + "name", + "sled" + ] + }, + "ProbeExternalIp": { + "type": "object", + "properties": { + "first_port": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "ip": { + "type": "string", + "format": "ip" + }, + "kind": { + "$ref": "#/components/schemas/ProbeExternalIpKind" + }, + "last_port": { + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "first_port", + "ip", + "kind", + "last_port" + ] + }, + "ProbeExternalIpKind": { + "type": "string", + "enum": [ + "snat", + "floating", + "ephemeral" + ] + }, + "ProbeInfo": { + "type": "object", + "properties": { + "external_ips": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ProbeExternalIp" + } + }, + "id": { + "type": "string", + "format": "uuid" + }, + "interface": { + "$ref": "#/components/schemas/NetworkInterface" + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "sled": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "external_ips", + "id", + "interface", + "name", + "sled" + ] + }, + "ProbeInfoResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/ProbeInfo" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "Project": { + "description": "View of a Project", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "id", + "name", + "time_created", + "time_modified" + ] + }, + "ProjectCreate": { + "description": "Create-time parameters for a `Project`", + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "name": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "description", + "name" + ] + }, + "ProjectResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/Project" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "ProjectRole": { + "type": "string", + "enum": [ + "admin", + "collaborator", + "limited_collaborator", + "viewer" + ] + }, + "ProjectRolePolicy": { + "description": "Policy for a particular resource\n\nNote that the Policy only describes access granted explicitly for this resource. The policies of parent resources can also cause a user to have access to this resource.", + "type": "object", + "properties": { + "role_assignments": { + "description": "Roles directly assigned on this resource", + "type": "array", + "items": { + "$ref": "#/components/schemas/ProjectRoleRoleAssignment" + } + } + }, + "required": [ + "role_assignments" + ] + }, + "ProjectRoleRoleAssignment": { + "description": "Describes the assignment of a particular role on a particular resource to a particular identity (user, group, etc.)\n\nThe resource is not part of this structure. Rather, `RoleAssignment`s are put into a `Policy` and that Policy is applied to a particular resource.", + "type": "object", + "properties": { + "identity_id": { + "type": "string", + "format": "uuid" + }, + "identity_type": { + "$ref": "#/components/schemas/IdentityType" + }, + "role_name": { + "$ref": "#/components/schemas/ProjectRole" + } + }, + "required": [ + "identity_id", + "identity_type", + "role_name" + ] + }, + "ProjectUpdate": { + "description": "Updateable properties of a `Project`", + "type": "object", + "properties": { + "description": { + "nullable": true, + "type": "string" + }, + "name": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + } + } + }, + "Quantile": { + "description": "Structure for estimating the p-quantile of a population.\n\nThis is based on the P² algorithm for estimating quantiles using constant space.\n\nThe algorithm consists of maintaining five markers: the minimum, the p/2-, p-, and (1 + p)/2 quantiles, and the maximum.", + "type": "object", + "properties": { + "desired_marker_positions": { + "description": "The desired marker positions.", + "type": "array", + "items": { + "type": "number", + "format": "double" + }, + "minItems": 5, + "maxItems": 5 + }, + "marker_heights": { + "description": "The heights of the markers.", + "type": "array", + "items": { + "type": "number", + "format": "double" + }, + "minItems": 5, + "maxItems": 5 + }, + "marker_positions": { + "description": "The positions of the markers.\n\nWe track sample size in the 5th position, as useful observations won't start until we've filled the heights at the 6th sample anyway This does deviate from the paper, but it's a more useful representation that works according to the paper's algorithm.", + "type": "array", + "items": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "minItems": 5, + "maxItems": 5 + }, + "p": { + "description": "The p value for the quantile.", + "type": "number", + "format": "double" + } + }, + "required": [ + "desired_marker_positions", + "marker_heights", + "marker_positions", + "p" + ] + }, + "Rack": { + "description": "View of an Rack", + "type": "object", + "properties": { + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "id", + "time_created", + "time_modified" + ] + }, + "RackResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/Rack" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "Route": { + "description": "A route to a destination network through a gateway address.", + "type": "object", + "properties": { + "dst": { + "description": "The route destination.", + "allOf": [ + { + "$ref": "#/components/schemas/IpNet" + } + ] + }, + "gw": { + "description": "The route gateway.", + "type": "string", + "format": "ip" + }, + "rib_priority": { + "nullable": true, + "description": "Route RIB priority. Higher priority indicates precedence within and across protocols.", + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "vid": { + "nullable": true, + "description": "VLAN id the gateway is reachable over.", + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "dst", + "gw" + ] + }, + "RouteConfig": { + "description": "Route configuration data associated with a switch port configuration.", + "type": "object", + "properties": { + "link_name": { + "description": "Link name. On ports that are not broken out, this is always phy0. On a 2x breakout the options are phy0 and phy1, on 4x phy0-phy3, etc.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "routes": { + "description": "The set of routes assigned to a switch port.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Route" + } + } + }, + "required": [ + "link_name", + "routes" + ] + }, + "RouteDestination": { + "description": "A `RouteDestination` is used to match traffic with a routing rule based on the destination of that traffic.\n\nWhen traffic is to be sent to a destination that is within a given `RouteDestination`, the corresponding `RouterRoute` applies, and traffic will be forward to the `RouteTarget` for that rule.", + "oneOf": [ + { + "description": "Route applies to traffic destined for the specified IP address", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "ip" + ] + }, + "value": { + "type": "string", + "format": "ip" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "description": "Route applies to traffic destined for the specified IP subnet", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "ip_net" + ] + }, + "value": { + "$ref": "#/components/schemas/IpNet" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "description": "Route applies to traffic destined for the specified VPC", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "vpc" + ] + }, + "value": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "description": "Route applies to traffic destined for the specified VPC subnet", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "subnet" + ] + }, + "value": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "type", + "value" + ] + } + ] + }, + "RouteTarget": { + "description": "A `RouteTarget` describes the possible locations that traffic matching a route destination can be sent.", + "oneOf": [ + { + "description": "Forward traffic to a particular IP address.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "ip" + ] + }, + "value": { + "type": "string", + "format": "ip" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "description": "Forward traffic to a VPC", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "vpc" + ] + }, + "value": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "description": "Forward traffic to a VPC Subnet", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "subnet" + ] + }, + "value": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "description": "Forward traffic to a specific instance", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "instance" + ] + }, + "value": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "description": "Forward traffic to an internet gateway", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "internet_gateway" + ] + }, + "value": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "description": "Drop matching traffic", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "drop" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, + "RouterRoute": { + "description": "A route defines a rule that governs where traffic should be sent based on its destination.", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "destination": { + "description": "Selects which traffic this routing rule will apply to", + "allOf": [ + { + "$ref": "#/components/schemas/RouteDestination" + } + ] + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "kind": { + "description": "Describes the kind of router. Set at creation. `read-only`", + "allOf": [ + { + "$ref": "#/components/schemas/RouterRouteKind" + } + ] + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "target": { + "description": "The location that matched packets should be forwarded to", + "allOf": [ + { + "$ref": "#/components/schemas/RouteTarget" + } + ] + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + }, + "vpc_router_id": { + "description": "The ID of the VPC Router to which the route belongs", + "type": "string", + "format": "uuid" + } + }, + "required": [ + "description", + "destination", + "id", + "kind", + "name", + "target", + "time_created", + "time_modified", + "vpc_router_id" + ] + }, + "RouterRouteCreate": { + "description": "Create-time parameters for a `RouterRoute`", + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "destination": { + "description": "Selects which traffic this routing rule will apply to.", + "allOf": [ + { + "$ref": "#/components/schemas/RouteDestination" + } + ] + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "target": { + "description": "The location that matched packets should be forwarded to.", + "allOf": [ + { + "$ref": "#/components/schemas/RouteTarget" + } + ] + } + }, + "required": [ + "description", + "destination", + "name", + "target" + ] + }, + "RouterRouteKind": { + "description": "The kind of a `RouterRoute`\n\nThe kind determines certain attributes such as if the route is modifiable and describes how or where the route was created.", + "oneOf": [ + { + "description": "Determines the default destination of traffic, such as whether it goes to the internet or not.\n\n`Destination: An Internet Gateway` `Modifiable: true`", + "type": "string", + "enum": [ + "default" + ] + }, + { + "description": "Automatically added for each VPC Subnet in the VPC\n\n`Destination: A VPC Subnet` `Modifiable: false`", + "type": "string", + "enum": [ + "vpc_subnet" + ] + }, + { + "description": "Automatically added when VPC peering is established\n\n`Destination: A different VPC` `Modifiable: false`", + "type": "string", + "enum": [ + "vpc_peering" + ] + }, + { + "description": "Created by a user; see `RouteTarget`\n\n`Destination: User defined` `Modifiable: true`", + "type": "string", + "enum": [ + "custom" + ] + } + ] + }, + "RouterRouteResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/RouterRoute" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "RouterRouteUpdate": { + "description": "Updateable properties of a `RouterRoute`", + "type": "object", + "properties": { + "description": { + "nullable": true, + "type": "string" + }, + "destination": { + "description": "Selects which traffic this routing rule will apply to.", + "allOf": [ + { + "$ref": "#/components/schemas/RouteDestination" + } + ] + }, + "name": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "target": { + "description": "The location that matched packets should be forwarded to.", + "allOf": [ + { + "$ref": "#/components/schemas/RouteTarget" + } + ] + } + }, + "required": [ + "destination", + "target" + ] + }, + "SamlIdentityProvider": { + "description": "Identity-related metadata that's included in nearly all public API objects", + "type": "object", + "properties": { + "acs_url": { + "description": "Service provider endpoint where the response will be sent", + "type": "string" + }, + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "group_attribute_name": { + "nullable": true, + "description": "If set, attributes with this name will be considered to denote a user's group membership, where the values will be the group names.", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "idp_entity_id": { + "description": "IdP's entity id", + "type": "string" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "public_cert": { + "nullable": true, + "description": "Optional request signing public certificate (base64 encoded der file)", + "type": "string" + }, + "slo_url": { + "description": "Service provider endpoint where the idp should send log out requests", + "type": "string" + }, + "sp_client_id": { + "description": "SP's client id", + "type": "string" + }, + "technical_contact_email": { + "description": "Customer's technical contact for saml configuration", + "type": "string" + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "acs_url", + "description", + "id", + "idp_entity_id", + "name", + "slo_url", + "sp_client_id", + "technical_contact_email", + "time_created", + "time_modified" + ] + }, + "SamlIdentityProviderCreate": { + "description": "Create-time identity-related parameters", + "type": "object", + "properties": { + "acs_url": { + "description": "service provider endpoint where the response will be sent", + "type": "string" + }, + "description": { + "type": "string" + }, + "group_attribute_name": { + "nullable": true, + "description": "If set, SAML attributes with this name will be considered to denote a user's group membership, where the attribute value(s) should be a comma-separated list of group names.", + "type": "string" + }, + "idp_entity_id": { + "description": "idp's entity id", + "type": "string" + }, + "idp_metadata_source": { + "description": "the source of an identity provider metadata descriptor", + "allOf": [ + { + "$ref": "#/components/schemas/IdpMetadataSource" + } + ] + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "signing_keypair": { + "nullable": true, + "description": "request signing key pair", + "default": null, + "allOf": [ + { + "$ref": "#/components/schemas/DerEncodedKeyPair" + } + ] + }, + "slo_url": { + "description": "service provider endpoint where the idp should send log out requests", + "type": "string" + }, + "sp_client_id": { + "description": "sp's client id", + "type": "string" + }, + "technical_contact_email": { + "description": "customer's technical contact for saml configuration", + "type": "string" + } + }, + "required": [ + "acs_url", + "description", + "idp_entity_id", + "idp_metadata_source", + "name", + "slo_url", + "sp_client_id", + "technical_contact_email" + ] + }, + "ScimClientBearerToken": { + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "time_created": { + "type": "string", + "format": "date-time" + }, + "time_expires": { + "nullable": true, + "type": "string", + "format": "date-time" + } + }, + "required": [ + "id", + "time_created" + ] + }, + "ScimClientBearerTokenValue": { + "description": "The POST response is the only time the generated bearer token is returned to the client.", + "type": "object", + "properties": { + "bearer_token": { + "type": "string" + }, + "id": { + "type": "string", + "format": "uuid" + }, + "time_created": { + "type": "string", + "format": "date-time" + }, + "time_expires": { + "nullable": true, + "type": "string", + "format": "date-time" + } + }, + "required": [ + "bearer_token", + "id", + "time_created" + ] + }, + "ServiceIcmpConfig": { + "description": "Configuration of inbound ICMP allowed by API services.", + "type": "object", + "properties": { + "enabled": { + "description": "When enabled, Nexus is able to receive ICMP Destination Unreachable type 3 (port unreachable) and type 4 (fragmentation needed), Redirect, and Time Exceeded messages. These enable Nexus to perform Path MTU discovery and better cope with fragmentation issues. Otherwise all inbound ICMP traffic will be dropped.", + "type": "boolean" + } + }, + "required": [ + "enabled" + ] + }, + "ServiceUsingCertificate": { + "description": "The service intended to use this certificate.", + "oneOf": [ + { + "description": "This certificate is intended for access to the external API.", + "type": "string", + "enum": [ + "external_api" + ] + } + ] + }, + "SetTargetReleaseParams": { + "description": "Parameters for PUT requests to `/v1/system/update/target-release`.", + "type": "object", + "properties": { + "system_version": { + "description": "Version of the system software to make the target release.", + "type": "string", + "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" + } + }, + "required": [ + "system_version" + ] + }, + "Silo": { + "description": "View of a Silo\n\nA Silo is the highest level unit of isolation.", + "type": "object", + "properties": { + "admin_group_name": { + "nullable": true, + "description": "Optionally, silos can have a group name that is automatically granted the silo admin role.", + "type": "string" + }, + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "discoverable": { + "description": "A silo where discoverable is false can be retrieved only by its id - it will not be part of the \"list all silos\" output.", + "type": "boolean" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "identity_mode": { + "description": "How users and groups are managed in this Silo", + "allOf": [ + { + "$ref": "#/components/schemas/SiloIdentityMode" + } + ] + }, + "mapped_fleet_roles": { + "description": "Mapping of which Fleet roles are conferred by each Silo role\n\nThe default is that no Fleet roles are conferred by any Silo roles unless there's a corresponding entry in this map.", + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/components/schemas/FleetRole" + }, + "uniqueItems": true + } + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "discoverable", + "id", + "identity_mode", + "mapped_fleet_roles", + "name", + "time_created", + "time_modified" + ] + }, + "SiloAuthSettings": { + "description": "View of silo authentication settings", + "type": "object", + "properties": { + "device_token_max_ttl_seconds": { + "nullable": true, + "description": "Maximum lifetime of a device token in seconds. If set to null, users will be able to create tokens that do not expire.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "silo_id": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "silo_id" + ] + }, + "SiloAuthSettingsUpdate": { + "description": "Updateable properties of a silo's settings.", + "type": "object", + "properties": { + "device_token_max_ttl_seconds": { + "nullable": true, + "description": "Maximum lifetime of a device token in seconds. If set to null, users will be able to create tokens that do not expire.", + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + "required": [ + "device_token_max_ttl_seconds" + ] + }, + "SiloCreate": { + "description": "Create-time parameters for a `Silo`", + "type": "object", + "properties": { + "admin_group_name": { + "nullable": true, + "description": "If set, this group will be created during Silo creation and granted the \"Silo Admin\" role. Identity providers can assert that users belong to this group and those users can log in and further initialize the Silo.\n\nNote that if configuring a SAML based identity provider, group_attribute_name must be set for users to be considered part of a group. See `SamlIdentityProviderCreate` for more information.", + "type": "string" + }, + "description": { + "type": "string" + }, + "discoverable": { + "type": "boolean" + }, + "identity_mode": { + "$ref": "#/components/schemas/SiloIdentityMode" + }, + "mapped_fleet_roles": { + "description": "Mapping of which Fleet roles are conferred by each Silo role\n\nThe default is that no Fleet roles are conferred by any Silo roles unless there's a corresponding entry in this map.", + "default": {}, + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/components/schemas/FleetRole" + }, + "uniqueItems": true + } + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "quotas": { + "description": "Limits the amount of provisionable CPU, memory, and storage in the Silo. CPU and memory are only consumed by running instances, while storage is consumed by any disk or snapshot. A value of 0 means that resource is *not* provisionable.", + "allOf": [ + { + "$ref": "#/components/schemas/SiloQuotasCreate" + } + ] + }, + "tls_certificates": { + "description": "Initial TLS certificates to be used for the new Silo's console and API endpoints. These should be valid for the Silo's DNS name(s).", + "type": "array", + "items": { + "$ref": "#/components/schemas/CertificateCreate" + } + } + }, + "required": [ + "description", + "discoverable", + "identity_mode", + "name", + "quotas", + "tls_certificates" + ] + }, + "SiloIdentityMode": { + "description": "Describes how identities are managed and users are authenticated in this Silo", + "oneOf": [ + { + "description": "Users are authenticated with SAML using an external authentication provider. The system updates information about users and groups only during successful authentication (i.e,. \"JIT provisioning\" of users and groups).", + "type": "string", + "enum": [ + "saml_jit" + ] + }, + { + "description": "The system is the source of truth about users. There is no linkage to an external authentication provider or identity provider.", + "type": "string", + "enum": [ + "local_only" + ] + }, + { + "description": "Users are authenticated with SAML using an external authentication provider. Users and groups are managed with SCIM API calls, likely from the same authentication provider.", + "type": "string", + "enum": [ + "saml_scim" + ] + } + ] + }, + "SiloIpPool": { + "description": "An IP pool in the context of a silo", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "is_default": { + "description": "When a pool is the default for a silo, floating IPs and instance ephemeral IPs will come from that pool when no other pool is specified. There can be at most one default for a given silo.", + "type": "boolean" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "id", + "is_default", + "name", + "time_created", + "time_modified" + ] + }, + "SiloIpPoolResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/SiloIpPool" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "SiloQuotas": { + "description": "A collection of resource counts used to set the virtual capacity of a silo", + "type": "object", + "properties": { + "cpus": { + "description": "Number of virtual CPUs", + "type": "integer", + "format": "int64" + }, + "memory": { + "description": "Amount of memory in bytes", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + }, + "silo_id": { + "type": "string", + "format": "uuid" + }, + "storage": { + "description": "Amount of disk storage in bytes", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + } + }, + "required": [ + "cpus", + "memory", + "silo_id", + "storage" + ] + }, + "SiloQuotasCreate": { + "description": "The amount of provisionable resources for a Silo", + "type": "object", + "properties": { + "cpus": { + "description": "The amount of virtual CPUs available for running instances in the Silo", + "type": "integer", + "format": "int64" + }, + "memory": { + "description": "The amount of RAM (in bytes) available for running instances in the Silo", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + }, + "storage": { + "description": "The amount of storage (in bytes) available for disks or snapshots", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + } + }, + "required": [ + "cpus", + "memory", + "storage" + ] + }, + "SiloQuotasResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/SiloQuotas" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "SiloQuotasUpdate": { + "description": "Updateable properties of a Silo's resource limits. If a value is omitted it will not be updated.", + "type": "object", + "properties": { + "cpus": { + "nullable": true, + "description": "The amount of virtual CPUs available for running instances in the Silo", + "type": "integer", + "format": "int64" + }, + "memory": { + "nullable": true, + "description": "The amount of RAM (in bytes) available for running instances in the Silo", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + }, + "storage": { + "nullable": true, + "description": "The amount of storage (in bytes) available for disks or snapshots", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + } + } + }, + "SiloResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/Silo" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "SiloRole": { + "type": "string", + "enum": [ + "admin", + "collaborator", + "limited_collaborator", + "viewer" + ] + }, + "SiloRolePolicy": { + "description": "Policy for a particular resource\n\nNote that the Policy only describes access granted explicitly for this resource. The policies of parent resources can also cause a user to have access to this resource.", + "type": "object", + "properties": { + "role_assignments": { + "description": "Roles directly assigned on this resource", + "type": "array", + "items": { + "$ref": "#/components/schemas/SiloRoleRoleAssignment" + } + } + }, + "required": [ + "role_assignments" + ] + }, + "SiloRoleRoleAssignment": { + "description": "Describes the assignment of a particular role on a particular resource to a particular identity (user, group, etc.)\n\nThe resource is not part of this structure. Rather, `RoleAssignment`s are put into a `Policy` and that Policy is applied to a particular resource.", + "type": "object", + "properties": { + "identity_id": { + "type": "string", + "format": "uuid" + }, + "identity_type": { + "$ref": "#/components/schemas/IdentityType" + }, + "role_name": { + "$ref": "#/components/schemas/SiloRole" + } + }, + "required": [ + "identity_id", + "identity_type", + "role_name" + ] + }, + "SiloUtilization": { + "description": "View of a silo's resource utilization and capacity", + "type": "object", + "properties": { + "allocated": { + "description": "Accounts for the total amount of resources reserved for silos via their quotas", + "allOf": [ + { + "$ref": "#/components/schemas/VirtualResourceCounts" + } + ] + }, + "provisioned": { + "description": "Accounts for resources allocated by in silos like CPU or memory for running instances and storage for disks and snapshots Note that CPU and memory resources associated with a stopped instances are not counted here", + "allOf": [ + { + "$ref": "#/components/schemas/VirtualResourceCounts" + } + ] + }, + "silo_id": { + "type": "string", + "format": "uuid" + }, + "silo_name": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "allocated", + "provisioned", + "silo_id", + "silo_name" + ] + }, + "SiloUtilizationResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/SiloUtilization" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "Sled": { + "description": "An operator's view of a Sled.", + "type": "object", + "properties": { + "baseboard": { + "$ref": "#/components/schemas/Baseboard" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "policy": { + "description": "The operator-defined policy of a sled.", + "allOf": [ + { + "$ref": "#/components/schemas/SledPolicy" + } + ] + }, + "rack_id": { + "description": "The rack to which this Sled is currently attached", + "type": "string", + "format": "uuid" + }, + "state": { + "description": "The current state of the sled.", + "allOf": [ + { + "$ref": "#/components/schemas/SledState" + } + ] + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + }, + "usable_hardware_threads": { + "description": "The number of hardware threads which can execute on this sled", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "usable_physical_ram": { + "description": "Amount of RAM which may be used by the Sled's OS", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + } + }, + "required": [ + "baseboard", + "id", + "policy", + "rack_id", + "state", + "time_created", + "time_modified", + "usable_hardware_threads", + "usable_physical_ram" + ] + }, + "SledId": { + "description": "The unique ID of a sled.", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "id" + ] + }, + "SledInstance": { + "description": "An operator's view of an instance running on a given sled", + "type": "object", + "properties": { + "active_sled_id": { + "type": "string", + "format": "uuid" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "memory": { + "type": "integer", + "format": "int64" + }, + "migration_id": { + "nullable": true, + "type": "string", + "format": "uuid" + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "ncpus": { + "type": "integer", + "format": "int64" + }, + "project_name": { + "$ref": "#/components/schemas/Name" + }, + "silo_name": { + "$ref": "#/components/schemas/Name" + }, + "state": { + "$ref": "#/components/schemas/InstanceState" + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "active_sled_id", + "id", + "memory", + "name", + "ncpus", + "project_name", + "silo_name", + "state", + "time_created", + "time_modified" + ] + }, + "SledInstanceResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/SledInstance" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "SledPolicy": { + "description": "The operator-defined policy of a sled.", + "oneOf": [ + { + "description": "The operator has indicated that the sled is in-service.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "in_service" + ] + }, + "provision_policy": { + "description": "Determines whether new resources can be provisioned onto the sled.", + "allOf": [ + { + "$ref": "#/components/schemas/SledProvisionPolicy" + } + ] + } + }, + "required": [ + "kind", + "provision_policy" + ] + }, + { + "description": "The operator has indicated that the sled has been permanently removed from service.\n\nThis is a terminal state: once a particular sled ID is expunged, it will never return to service. (The actual hardware may be reused, but it will be treated as a brand-new sled.)\n\nAn expunged sled is always non-provisionable.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "expunged" + ] + } + }, + "required": [ + "kind" + ] + } + ] + }, + "SledProvisionPolicy": { + "description": "The operator-defined provision policy of a sled.\n\nThis controls whether new resources are going to be provisioned on this sled.", + "oneOf": [ + { + "description": "New resources will be provisioned on this sled.", + "type": "string", + "enum": [ + "provisionable" + ] + }, + { + "description": "New resources will not be provisioned on this sled. However, if the sled is currently in service, existing resources will continue to be on this sled unless manually migrated off.", + "type": "string", + "enum": [ + "non_provisionable" + ] + } + ] + }, + "SledProvisionPolicyParams": { + "description": "Parameters for `sled_set_provision_policy`.", + "type": "object", + "properties": { + "state": { + "description": "The provision state.", + "allOf": [ + { + "$ref": "#/components/schemas/SledProvisionPolicy" + } + ] + } + }, + "required": [ + "state" + ] + }, + "SledProvisionPolicyResponse": { + "description": "Response to `sled_set_provision_policy`.", + "type": "object", + "properties": { + "new_state": { + "description": "The new provision state.", + "allOf": [ + { + "$ref": "#/components/schemas/SledProvisionPolicy" + } + ] + }, + "old_state": { + "description": "The old provision state.", + "allOf": [ + { + "$ref": "#/components/schemas/SledProvisionPolicy" + } + ] + } + }, + "required": [ + "new_state", + "old_state" + ] + }, + "SledResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/Sled" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "SledState": { + "description": "The current state of the sled.", + "oneOf": [ + { + "description": "The sled is currently active, and has resources allocated on it.", + "type": "string", + "enum": [ + "active" + ] + }, + { + "description": "The sled has been permanently removed from service.\n\nThis is a terminal state: once a particular sled ID is decommissioned, it will never return to service. (The actual hardware may be reused, but it will be treated as a brand-new sled.)", + "type": "string", + "enum": [ + "decommissioned" + ] + } + ] + }, + "Snapshot": { + "description": "View of a Snapshot", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "disk_id": { + "type": "string", + "format": "uuid" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "project_id": { + "type": "string", + "format": "uuid" + }, + "size": { + "$ref": "#/components/schemas/ByteCount" + }, + "state": { + "$ref": "#/components/schemas/SnapshotState" + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "disk_id", + "id", + "name", + "project_id", + "size", + "state", + "time_created", + "time_modified" + ] + }, + "SnapshotCreate": { + "description": "Create-time parameters for a `Snapshot`", + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "disk": { + "description": "The disk to be snapshotted", + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" + } + ] + }, + "name": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "description", + "disk", + "name" + ] + }, + "SnapshotResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/Snapshot" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "SnapshotState": { + "type": "string", + "enum": [ + "creating", + "ready", + "faulted", + "destroyed" + ] + }, + "SshKey": { + "description": "View of an SSH Key", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "public_key": { + "description": "SSH public key, e.g., `\"ssh-ed25519 AAAAC3NzaC...\"`", + "type": "string" + }, + "silo_user_id": { + "description": "The user to whom this key belongs", + "type": "string", + "format": "uuid" + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "id", + "name", + "public_key", + "silo_user_id", + "time_created", + "time_modified" + ] + }, + "SshKeyCreate": { + "description": "Create-time parameters for an `SshKey`", + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "public_key": { + "description": "SSH public key, e.g., `\"ssh-ed25519 AAAAC3NzaC...\"`", + "type": "string" + } + }, + "required": [ + "description", + "name", + "public_key" + ] + }, + "SshKeyResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/SshKey" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "SupportBundleCreate": { + "type": "object", + "properties": { + "user_comment": { + "nullable": true, + "description": "User comment for the support bundle", + "type": "string" + } + } + }, + "SupportBundleInfo": { + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "reason_for_creation": { + "type": "string" + }, + "reason_for_failure": { + "nullable": true, + "type": "string" + }, + "state": { + "$ref": "#/components/schemas/SupportBundleState" + }, + "time_created": { + "type": "string", + "format": "date-time" + }, + "user_comment": { + "nullable": true, + "type": "string" + } + }, + "required": [ + "id", + "reason_for_creation", + "state", + "time_created" + ] + }, + "SupportBundleInfoResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/SupportBundleInfo" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "SupportBundleState": { + "oneOf": [ + { + "description": "Support Bundle still actively being collected.\n\nThis is the initial state for a Support Bundle, and it will automatically transition to either \"Failing\" or \"Active\".\n\nIf a user no longer wants to access a Support Bundle, they can request cancellation, which will transition to the \"Destroying\" state.", + "type": "string", + "enum": [ + "collecting" + ] + }, + { + "description": "Support Bundle is being destroyed.\n\nOnce backing storage has been freed, this bundle is destroyed.", + "type": "string", + "enum": [ + "destroying" + ] + }, + { + "description": "Support Bundle was not created successfully, or was created and has lost backing storage.\n\nThe record of the bundle still exists for readability, but the only valid operation on these bundles is to destroy them.", + "type": "string", + "enum": [ + "failed" + ] + }, + { + "description": "Support Bundle has been processed, and is ready for usage.", + "type": "string", + "enum": [ + "active" + ] + } + ] + }, + "SupportBundleUpdate": { + "type": "object", + "properties": { + "user_comment": { + "nullable": true, + "description": "User comment for the support bundle", + "type": "string" + } + } + }, + "Switch": { + "description": "An operator's view of a Switch.", + "type": "object", + "properties": { + "baseboard": { + "$ref": "#/components/schemas/Baseboard" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "rack_id": { + "description": "The rack to which this Switch is currently attached", + "type": "string", + "format": "uuid" + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "baseboard", + "id", + "rack_id", + "time_created", + "time_modified" + ] + }, + "SwitchBgpHistory": { + "description": "BGP message history for a particular switch.", + "type": "object", + "properties": { + "history": { + "description": "Message history indexed by peer address.", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/BgpMessageHistory" + } + }, + "switch": { + "description": "Switch this message history is associated with.", + "allOf": [ + { + "$ref": "#/components/schemas/SwitchLocation" + } + ] + } + }, + "required": [ + "history", + "switch" + ] + }, + "SwitchInterfaceConfig": { + "description": "A switch port interface configuration for a port settings object.", + "type": "object", + "properties": { + "id": { + "description": "A unique identifier for this switch interface.", + "type": "string", + "format": "uuid" + }, + "interface_name": { + "description": "The name of this switch interface.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "kind": { + "description": "The switch interface kind.", + "allOf": [ + { + "$ref": "#/components/schemas/SwitchInterfaceKind2" + } + ] + }, + "port_settings_id": { + "description": "The port settings object this switch interface configuration belongs to.", + "type": "string", + "format": "uuid" + }, + "v6_enabled": { + "description": "Whether or not IPv6 is enabled on this interface.", + "type": "boolean" + } + }, + "required": [ + "id", + "interface_name", + "kind", + "port_settings_id", + "v6_enabled" + ] + }, + "SwitchInterfaceConfigCreate": { + "description": "A layer-3 switch interface configuration. When IPv6 is enabled, a link local address will be created for the interface.", + "type": "object", + "properties": { + "kind": { + "description": "What kind of switch interface this configuration represents.", + "allOf": [ + { + "$ref": "#/components/schemas/SwitchInterfaceKind" + } + ] + }, + "link_name": { + "description": "Link name. On ports that are not broken out, this is always phy0. On a 2x breakout the options are phy0 and phy1, on 4x phy0-phy3, etc.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "v6_enabled": { + "description": "Whether or not IPv6 is enabled.", + "type": "boolean" + } + }, + "required": [ + "kind", + "link_name", + "v6_enabled" + ] + }, + "SwitchInterfaceKind": { + "description": "Indicates the kind for a switch interface.", + "oneOf": [ + { + "description": "Primary interfaces are associated with physical links. There is exactly one primary interface per physical link.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "primary" + ] + } + }, + "required": [ + "type" + ] + }, + { + "description": "VLAN interfaces allow physical interfaces to be multiplexed onto multiple logical links, each distinguished by a 12-bit 802.1Q Ethernet tag.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "vlan" + ] + }, + "vid": { + "description": "The virtual network id (VID) that distinguishes this interface and is used for producing and consuming 802.1Q Ethernet tags. This field has a maximum value of 4095 as 802.1Q tags are twelve bits.", + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "type", + "vid" + ] + }, + { + "description": "Loopback interfaces are anchors for IP addresses that are not specific to any particular port.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "loopback" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, + "SwitchInterfaceKind2": { + "description": "Describes the kind of an switch interface.", + "oneOf": [ + { + "description": "Primary interfaces are associated with physical links. There is exactly one primary interface per physical link.", + "type": "string", + "enum": [ + "primary" + ] + }, + { + "description": "VLAN interfaces allow physical interfaces to be multiplexed onto multiple logical links, each distinguished by a 12-bit 802.1Q Ethernet tag.", + "type": "string", + "enum": [ + "vlan" + ] + }, + { + "description": "Loopback interfaces are anchors for IP addresses that are not specific to any particular port.", + "type": "string", + "enum": [ + "loopback" + ] + } + ] + }, + "SwitchLinkState": {}, + "SwitchLocation": { + "description": "Identifies switch physical location", + "oneOf": [ + { + "description": "Switch in upper slot", + "type": "string", + "enum": [ + "switch0" + ] + }, + { + "description": "Switch in lower slot", + "type": "string", + "enum": [ + "switch1" + ] + } + ] + }, + "SwitchPort": { + "description": "A switch port represents a physical external port on a rack switch.", + "type": "object", + "properties": { + "id": { + "description": "The id of the switch port.", + "type": "string", + "format": "uuid" + }, + "port_name": { + "description": "The name of this switch port.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "port_settings_id": { + "nullable": true, + "description": "The primary settings group of this switch port. Will be `None` until this switch port is configured.", + "type": "string", + "format": "uuid" + }, + "rack_id": { + "description": "The rack this switch port belongs to.", + "type": "string", + "format": "uuid" + }, + "switch_location": { + "description": "The switch location of this switch port.", + "type": "string" + } + }, + "required": [ + "id", + "port_name", + "rack_id", + "switch_location" + ] + }, + "SwitchPortAddressView": { + "description": "An IP address configuration for a port settings object.", + "type": "object", + "properties": { + "address": { + "description": "The IP address and prefix.", + "allOf": [ + { + "$ref": "#/components/schemas/IpNet" + } + ] + }, + "address_lot_block_id": { + "description": "The id of the address lot block this address is drawn from.", + "type": "string", + "format": "uuid" + }, + "address_lot_id": { + "description": "The id of the address lot this address is drawn from.", + "type": "string", + "format": "uuid" + }, + "address_lot_name": { + "description": "The name of the address lot this address is drawn from.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "interface_name": { + "description": "The interface name this address belongs to.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "port_settings_id": { + "description": "The port settings object this address configuration belongs to.", + "type": "string", + "format": "uuid" + }, + "vlan_id": { + "nullable": true, + "description": "An optional VLAN ID", + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "address", + "address_lot_block_id", + "address_lot_id", + "address_lot_name", + "interface_name", + "port_settings_id" + ] + }, + "SwitchPortApplySettings": { + "description": "Parameters for applying settings to switch ports.", + "type": "object", + "properties": { + "port_settings": { + "description": "A name or id to use when applying switch port settings.", + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" + } + ] + } + }, + "required": [ + "port_settings" + ] + }, + "SwitchPortConfig": { + "description": "A physical port configuration for a port settings object.", + "type": "object", + "properties": { + "geometry": { + "description": "The physical link geometry of the port.", + "allOf": [ + { + "$ref": "#/components/schemas/SwitchPortGeometry2" + } + ] + }, + "port_settings_id": { + "description": "The id of the port settings object this configuration belongs to.", + "type": "string", + "format": "uuid" + } + }, + "required": [ + "geometry", + "port_settings_id" + ] + }, + "SwitchPortConfigCreate": { + "description": "Physical switch port configuration.", + "type": "object", + "properties": { + "geometry": { + "description": "Link geometry for the switch port.", + "allOf": [ + { + "$ref": "#/components/schemas/SwitchPortGeometry" + } + ] + } + }, + "required": [ + "geometry" + ] + }, + "SwitchPortGeometry": { + "description": "The link geometry associated with a switch port.", + "oneOf": [ + { + "description": "The port contains a single QSFP28 link with four lanes.", + "type": "string", + "enum": [ + "qsfp28x1" + ] + }, + { + "description": "The port contains two QSFP28 links each with two lanes.", + "type": "string", + "enum": [ + "qsfp28x2" + ] + }, + { + "description": "The port contains four SFP28 links each with one lane.", + "type": "string", + "enum": [ + "sfp28x4" + ] + } + ] + }, + "SwitchPortGeometry2": { + "description": "The link geometry associated with a switch port.", + "oneOf": [ + { + "description": "The port contains a single QSFP28 link with four lanes.", + "type": "string", + "enum": [ + "qsfp28x1" + ] + }, + { + "description": "The port contains two QSFP28 links each with two lanes.", + "type": "string", + "enum": [ + "qsfp28x2" + ] + }, + { + "description": "The port contains four SFP28 links each with one lane.", + "type": "string", + "enum": [ + "sfp28x4" + ] + } + ] + }, + "SwitchPortLinkConfig": { + "description": "A link configuration for a port settings object.", + "type": "object", + "properties": { + "autoneg": { + "description": "Whether or not the link has autonegotiation enabled.", + "type": "boolean" + }, + "fec": { + "nullable": true, + "description": "The requested forward-error correction method. If this is not specified, the standard FEC for the underlying media will be applied if it can be determined.", + "allOf": [ + { + "$ref": "#/components/schemas/LinkFec" + } + ] + }, + "link_name": { + "description": "The name of this link.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "lldp_link_config": { + "nullable": true, + "description": "The link-layer discovery protocol service configuration for this link.", + "allOf": [ + { + "$ref": "#/components/schemas/LldpLinkConfig" + } + ] + }, + "mtu": { + "description": "The maximum transmission unit for this link.", + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "port_settings_id": { + "description": "The port settings this link configuration belongs to.", + "type": "string", + "format": "uuid" + }, + "speed": { + "description": "The configured speed of the link.", + "allOf": [ + { + "$ref": "#/components/schemas/LinkSpeed" + } + ] + }, + "tx_eq_config": { + "nullable": true, + "description": "The tx_eq configuration for this link.", + "allOf": [ + { + "$ref": "#/components/schemas/TxEqConfig2" + } + ] + } + }, + "required": [ + "autoneg", + "link_name", + "mtu", + "port_settings_id", + "speed" + ] + }, + "SwitchPortResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/SwitchPort" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "SwitchPortRouteConfig": { + "description": "A route configuration for a port settings object.", + "type": "object", + "properties": { + "dst": { + "description": "The route's destination network.", + "allOf": [ + { + "$ref": "#/components/schemas/IpNet" + } + ] + }, + "gw": { + "description": "The route's gateway address.", + "type": "string", + "format": "ip" + }, + "interface_name": { + "description": "The interface name this route configuration is assigned to.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "port_settings_id": { + "description": "The port settings object this route configuration belongs to.", + "type": "string", + "format": "uuid" + }, + "rib_priority": { + "nullable": true, + "description": "Route RIB priority. Higher priority indicates precedence within and across protocols.", + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "vlan_id": { + "nullable": true, + "description": "The VLAN identifier for the route. Use this if the gateway is reachable over an 802.1Q tagged L2 segment.", + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "dst", + "gw", + "interface_name", + "port_settings_id" + ] + }, + "SwitchPortSettings": { + "description": "This structure contains all port settings information in one place. It's a convenience data structure for getting a complete view of a particular port's settings.", + "type": "object", + "properties": { + "addresses": { + "description": "Layer 3 IP address settings.", + "type": "array", + "items": { + "$ref": "#/components/schemas/SwitchPortAddressView" + } + }, + "bgp_peers": { + "description": "BGP peer settings.", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpPeer" + } + }, + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "groups": { + "description": "Switch port settings included from other switch port settings groups.", + "type": "array", + "items": { + "$ref": "#/components/schemas/SwitchPortSettingsGroups" + } + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "interfaces": { + "description": "Layer 3 interface settings.", + "type": "array", + "items": { + "$ref": "#/components/schemas/SwitchInterfaceConfig" + } + }, + "links": { + "description": "Layer 2 link settings.", + "type": "array", + "items": { + "$ref": "#/components/schemas/SwitchPortLinkConfig" + } + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "port": { + "description": "Layer 1 physical port settings.", + "allOf": [ + { + "$ref": "#/components/schemas/SwitchPortConfig" + } + ] + }, + "routes": { + "description": "IP route settings.", + "type": "array", + "items": { + "$ref": "#/components/schemas/SwitchPortRouteConfig" + } + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + }, + "vlan_interfaces": { + "description": "Vlan interface settings.", + "type": "array", + "items": { + "$ref": "#/components/schemas/SwitchVlanInterfaceConfig" + } + } + }, + "required": [ + "addresses", + "bgp_peers", + "description", + "groups", + "id", + "interfaces", + "links", + "name", + "port", + "routes", + "time_created", + "time_modified", + "vlan_interfaces" + ] + }, + "SwitchPortSettingsCreate": { + "description": "Parameters for creating switch port settings. Switch port settings are the central data structure for setting up external networking. Switch port settings include link, interface, route, address and dynamic network protocol configuration.", + "type": "object", + "properties": { + "addresses": { + "description": "Address configurations.", + "type": "array", + "items": { + "$ref": "#/components/schemas/AddressConfig" + } + }, + "bgp_peers": { + "description": "BGP peer configurations.", + "default": [], + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpPeerConfig" + } + }, + "description": { + "type": "string" + }, + "groups": { + "default": [], + "type": "array", + "items": { + "$ref": "#/components/schemas/NameOrId" + } + }, + "interfaces": { + "description": "Interface configurations.", + "default": [], + "type": "array", + "items": { + "$ref": "#/components/schemas/SwitchInterfaceConfigCreate" + } + }, + "links": { + "description": "Link configurations.", + "type": "array", + "items": { + "$ref": "#/components/schemas/LinkConfigCreate" + } + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "port_config": { + "$ref": "#/components/schemas/SwitchPortConfigCreate" + }, + "routes": { + "description": "Route configurations.", + "default": [], + "type": "array", + "items": { + "$ref": "#/components/schemas/RouteConfig" + } + } + }, + "required": [ + "addresses", + "description", + "links", + "name", + "port_config" + ] + }, + "SwitchPortSettingsGroups": { + "description": "This structure maps a port settings object to a port settings groups. Port settings objects may inherit settings from groups. This mapping defines the relationship between settings objects and the groups they reference.", + "type": "object", + "properties": { + "port_settings_group_id": { + "description": "The id of a port settings group being referenced by a port settings object.", + "type": "string", + "format": "uuid" + }, + "port_settings_id": { + "description": "The id of a port settings object referencing a port settings group.", + "type": "string", + "format": "uuid" + } + }, + "required": [ + "port_settings_group_id", + "port_settings_id" + ] + }, + "SwitchPortSettingsIdentity": { + "description": "A switch port settings identity whose id may be used to view additional details.", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "id", + "name", + "time_created", + "time_modified" + ] + }, + "SwitchPortSettingsIdentityResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/SwitchPortSettingsIdentity" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "SwitchResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/Switch" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "SwitchVlanInterfaceConfig": { + "description": "A switch port VLAN interface configuration for a port settings object.", + "type": "object", + "properties": { + "interface_config_id": { + "description": "The switch interface configuration this VLAN interface configuration belongs to.", + "type": "string", + "format": "uuid" + }, + "vlan_id": { + "description": "The virtual network id for this interface that is used for producing and consuming 802.1Q Ethernet tags. This field has a maximum value of 4095 as 802.1Q tags are twelve bits.", + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "interface_config_id", + "vlan_id" + ] + }, + "TargetRelease": { + "description": "View of a system software target release", + "type": "object", + "properties": { + "time_requested": { + "description": "Time this was set as the target release", + "type": "string", + "format": "date-time" + }, + "version": { + "description": "The specified release of the rack's system software", + "type": "string", + "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" + } + }, + "required": [ + "time_requested", + "version" + ] + }, + "Timeseries": { + "description": "A timeseries contains a timestamped set of values from one source.\n\nThis includes the typed key-value pairs that uniquely identify it, and the set of timestamps and data values from it.", + "type": "object", + "properties": { + "fields": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/FieldValue" + } + }, + "points": { + "$ref": "#/components/schemas/Points" + } + }, + "required": [ + "fields", + "points" + ] + }, + "TimeseriesDescription": { + "description": "Text descriptions for the target and metric of a timeseries.", + "type": "object", + "properties": { + "metric": { + "type": "string" + }, + "target": { + "type": "string" + } + }, + "required": [ + "metric", + "target" + ] + }, + "TimeseriesName": { + "title": "The name of a timeseries", + "description": "Names are constructed by concatenating the target and metric names with ':'. Target and metric names must be lowercase alphanumeric characters with '_' separating words.", + "type": "string", + "pattern": "^(([a-z]+[a-z0-9]*)(_([a-z0-9]+))*):(([a-z]+[a-z0-9]*)(_([a-z0-9]+))*)$" + }, + "TimeseriesQuery": { + "description": "A timeseries query string, written in the Oximeter query language.", + "type": "object", + "properties": { + "query": { + "description": "A timeseries query string, written in the Oximeter query language.", + "type": "string" + } + }, + "required": [ + "query" + ] + }, + "TimeseriesSchema": { + "description": "The schema for a timeseries.\n\nThis includes the name of the timeseries, as well as the datum type of its metric and the schema for each field.", + "type": "object", + "properties": { + "authz_scope": { + "$ref": "#/components/schemas/AuthzScope" + }, + "created": { + "type": "string", + "format": "date-time" + }, + "datum_type": { + "$ref": "#/components/schemas/DatumType" + }, + "description": { + "$ref": "#/components/schemas/TimeseriesDescription" + }, + "field_schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/FieldSchema" + }, + "uniqueItems": true + }, + "timeseries_name": { + "$ref": "#/components/schemas/TimeseriesName" + }, + "units": { + "$ref": "#/components/schemas/Units" + }, + "version": { + "type": "integer", + "format": "uint8", + "minimum": 1 + } + }, + "required": [ + "authz_scope", + "created", + "datum_type", + "description", + "field_schema", + "timeseries_name", + "units", + "version" + ] + }, + "TimeseriesSchemaResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/TimeseriesSchema" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "TufRepo": { + "description": "Metadata about a TUF repository", + "type": "object", + "properties": { + "file_name": { + "description": "The file name of the repository, as reported by the client that uploaded it\n\nThis is intended for debugging. The file name may not match any particular pattern, and even if it does, it may not be accurate since it's just what the client reported.", + "type": "string" + }, + "hash": { + "description": "The hash of the repository", + "type": "string", + "format": "hex string (32 bytes)" + }, + "system_version": { + "description": "The system version for this repository\n\nThe system version is a top-level version number applied to all the software in the repository.", + "type": "string", + "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" + }, + "time_created": { + "description": "Time the repository was uploaded", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "file_name", + "hash", + "system_version", + "time_created" + ] + }, + "TufRepoResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/TufRepo" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "TufRepoUpload": { + "type": "object", + "properties": { + "repo": { + "$ref": "#/components/schemas/TufRepo" + }, + "status": { + "$ref": "#/components/schemas/TufRepoUploadStatus" + } + }, + "required": [ + "repo", + "status" + ] + }, + "TufRepoUploadStatus": { + "description": "Whether the uploaded TUF repo already existed or was new and had to be inserted. Part of `TufRepoUpload`.", + "oneOf": [ + { + "description": "The repository already existed in the database", + "type": "string", + "enum": [ + "already_exists" + ] + }, + { + "description": "The repository did not exist, and was inserted into the database", + "type": "string", + "enum": [ + "inserted" + ] + } + ] + }, + "TxEqConfig": { + "description": "Per-port tx-eq overrides. This can be used to fine-tune the transceiver equalization settings to improve signal integrity.", + "type": "object", + "properties": { + "main": { + "nullable": true, + "description": "Main tap", + "type": "integer", + "format": "int32" + }, + "post1": { + "nullable": true, + "description": "Post-cursor tap1", + "type": "integer", + "format": "int32" + }, + "post2": { + "nullable": true, + "description": "Post-cursor tap2", + "type": "integer", + "format": "int32" + }, + "pre1": { + "nullable": true, + "description": "Pre-cursor tap1", + "type": "integer", + "format": "int32" + }, + "pre2": { + "nullable": true, + "description": "Pre-cursor tap2", + "type": "integer", + "format": "int32" + } + } + }, + "TxEqConfig2": { + "description": "Per-port tx-eq overrides. This can be used to fine-tune the transceiver equalization settings to improve signal integrity.", + "type": "object", + "properties": { + "main": { + "nullable": true, + "description": "Main tap", + "type": "integer", + "format": "int32" + }, + "post1": { + "nullable": true, + "description": "Post-cursor tap1", + "type": "integer", + "format": "int32" + }, + "post2": { + "nullable": true, + "description": "Post-cursor tap2", + "type": "integer", + "format": "int32" + }, + "pre1": { + "nullable": true, + "description": "Pre-cursor tap1", + "type": "integer", + "format": "int32" + }, + "pre2": { + "nullable": true, + "description": "Pre-cursor tap2", + "type": "integer", + "format": "int32" + } + } + }, + "UninitializedSled": { + "description": "A sled that has not been added to an initialized rack yet", + "type": "object", + "properties": { + "baseboard": { + "$ref": "#/components/schemas/Baseboard" + }, + "cubby": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "rack_id": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "baseboard", + "cubby", + "rack_id" + ] + }, + "UninitializedSledId": { + "description": "The unique hardware ID for a sled", + "type": "object", + "properties": { + "part": { + "type": "string" + }, + "serial": { + "type": "string" + } + }, + "required": [ + "part", + "serial" + ] + }, + "UninitializedSledResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/UninitializedSled" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "Units": { + "description": "Measurement units for timeseries samples.", + "oneOf": [ + { + "type": "string", + "enum": [ + "count", + "bytes", + "seconds", + "nanoseconds", + "volts", + "amps", + "watts", + "degrees_celsius" + ] + }, + { + "description": "No meaningful units, e.g. a dimensionless quanity.", + "type": "string", + "enum": [ + "none" + ] + }, + { + "description": "Rotations per minute.", + "type": "string", + "enum": [ + "rpm" + ] + } + ] + }, + "UpdateStatus": { + "type": "object", + "properties": { + "components_by_release_version": { + "description": "Count of components running each release version\n\nKeys will be either:\n\n* Semver-like release version strings * \"install dataset\", representing the initial rack software before any updates * \"unknown\", which means there is no TUF repo uploaded that matches the software running on the component)", + "type": "object", + "additionalProperties": { + "type": "integer", + "format": "uint", + "minimum": 0 + } + }, + "suspended": { + "description": "Whether automatic update is suspended due to manual update activity\n\nAfter a manual support procedure that changes the system software, automatic update activity is suspended to avoid undoing the change. To resume automatic update, first upload the TUF repository matching the manually applied update, then set that as the target release.", + "type": "boolean" + }, + "target_release": { + "nullable": true, + "description": "Current target release of the system software\n\nThis may not correspond to the actual system software running at the time of request; it is instead the release that the system should be moving towards as a goal state. The system asynchronously updates software to match this target release.\n\nWill only be null if a target release has never been set. In that case, the system is not automatically attempting to manage software versions.", + "allOf": [ + { + "$ref": "#/components/schemas/TargetRelease" + } + ] + }, + "time_last_step_planned": { + "description": "Time of most recent update planning activity\n\nThis is intended as a rough indicator of the last time something happened in the update planner.", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "components_by_release_version", + "suspended", + "target_release", + "time_last_step_planned" + ] + }, + "UpdatesTrustRoot": { + "description": "Trusted root role used by the update system to verify update repositories.", + "type": "object", + "properties": { + "id": { + "description": "The UUID of this trusted root role.", + "type": "string", + "format": "uuid" + }, + "root_role": { + "description": "The trusted root role itself, a JSON document as described by The Update Framework." + }, + "time_created": { + "description": "Time the trusted root role was added.", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "id", + "root_role", + "time_created" + ] + }, + "UpdatesTrustRootResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/UpdatesTrustRoot" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "User": { + "description": "View of a User", + "type": "object", + "properties": { + "display_name": { + "description": "Human-readable name that can identify the user", + "type": "string" + }, + "id": { + "type": "string", + "format": "uuid" + }, + "silo_id": { + "description": "Uuid of the silo to which this user belongs", + "type": "string", + "format": "uuid" + } + }, + "required": [ + "display_name", + "id", + "silo_id" + ] + }, + "UserBuiltin": { + "description": "View of a Built-in User\n\nBuilt-in users are identities internal to the system, used when the control plane performs actions autonomously", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "id", + "name", + "time_created", + "time_modified" + ] + }, + "UserBuiltinResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/UserBuiltin" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "UserCreate": { + "description": "Create-time parameters for a `User`", + "type": "object", + "properties": { + "external_id": { + "description": "username used to log in", + "allOf": [ + { + "$ref": "#/components/schemas/UserId" + } + ] + }, + "password": { + "description": "how to set the user's login password", + "allOf": [ + { + "$ref": "#/components/schemas/UserPassword" + } + ] + } + }, + "required": [ + "external_id", + "password" + ] + }, + "UserId": { + "title": "A username for a local-only user", + "description": "Usernames must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Usernames cannot be a UUID, but they may contain a UUID. They can be at most 63 characters long.", + "type": "string", + "pattern": "^(?![0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$)^[a-z]([a-zA-Z0-9-]*[a-zA-Z0-9]+)?$", + "minLength": 1, + "maxLength": 63 + }, + "UserPassword": { + "description": "Parameters for setting a user's password", + "oneOf": [ + { + "description": "Sets the user's password to the provided value", + "type": "object", + "properties": { + "mode": { + "type": "string", + "enum": [ + "password" + ] + }, + "value": { + "$ref": "#/components/schemas/Password" + } + }, + "required": [ + "mode", + "value" + ] + }, + { + "description": "Invalidates any current password (disabling password authentication)", + "type": "object", + "properties": { + "mode": { + "type": "string", + "enum": [ + "login_disallowed" + ] + } + }, + "required": [ + "mode" + ] + } + ] + }, + "UserResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/User" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "UsernamePasswordCredentials": { + "description": "Credentials for local user login", + "type": "object", + "properties": { + "password": { + "$ref": "#/components/schemas/Password" + }, + "username": { + "$ref": "#/components/schemas/UserId" + } + }, + "required": [ + "password", + "username" + ] + }, + "Utilization": { + "description": "View of the current silo's resource utilization and capacity", + "type": "object", + "properties": { + "capacity": { + "description": "The total amount of resources that can be provisioned in this silo Actions that would exceed this limit will fail", + "allOf": [ + { + "$ref": "#/components/schemas/VirtualResourceCounts" + } + ] + }, + "provisioned": { + "description": "Accounts for resources allocated to running instances or storage allocated via disks or snapshots Note that CPU and memory resources associated with a stopped instances are not counted here whereas associated disks will still be counted", + "allOf": [ + { + "$ref": "#/components/schemas/VirtualResourceCounts" + } + ] + } + }, + "required": [ + "capacity", + "provisioned" + ] + }, + "ValueArray": { + "description": "List of data values for one timeseries.\n\nEach element is an option, where `None` represents a missing sample.", + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "integer" + ] + }, + "values": { + "type": "array", + "items": { + "nullable": true, + "type": "integer", + "format": "int64" + } + } + }, + "required": [ + "type", + "values" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "double" + ] + }, + "values": { + "type": "array", + "items": { + "nullable": true, + "type": "number", + "format": "double" + } + } + }, + "required": [ + "type", + "values" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "boolean" + ] + }, + "values": { + "type": "array", + "items": { + "nullable": true, + "type": "boolean" + } + } + }, + "required": [ + "type", + "values" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "string" + ] + }, + "values": { + "type": "array", + "items": { + "nullable": true, + "type": "string" + } + } + }, + "required": [ + "type", + "values" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "integer_distribution" + ] + }, + "values": { + "type": "array", + "items": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/Distributionint64" + } + ] + } + } + }, + "required": [ + "type", + "values" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "double_distribution" + ] + }, + "values": { + "type": "array", + "items": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/Distributiondouble" + } + ] + } + } + }, + "required": [ + "type", + "values" + ] + } + ] + }, + "Values": { + "description": "A single list of values, for one dimension of a timeseries.", + "type": "object", + "properties": { + "metric_type": { + "description": "The type of this metric.", + "allOf": [ + { + "$ref": "#/components/schemas/MetricType" + } + ] + }, + "values": { + "description": "The data values.", + "allOf": [ + { + "$ref": "#/components/schemas/ValueArray" + } + ] + } + }, + "required": [ + "metric_type", + "values" + ] + }, + "VirtualResourceCounts": { + "description": "A collection of resource counts used to describe capacity and utilization", + "type": "object", + "properties": { + "cpus": { + "description": "Number of virtual CPUs", + "type": "integer", + "format": "int64" + }, + "memory": { + "description": "Amount of memory in bytes", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + }, + "storage": { + "description": "Amount of disk storage in bytes", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + } + }, + "required": [ + "cpus", + "memory", + "storage" + ] + }, + "Vni": { + "description": "A Geneve Virtual Network Identifier", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "Vpc": { + "description": "View of a VPC", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "dns_name": { + "description": "The name used for the VPC in DNS.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "ipv6_prefix": { + "description": "The unique local IPv6 address range for subnets in this VPC", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv6Net" + } + ] + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "project_id": { + "description": "id for the project containing this VPC", + "type": "string", + "format": "uuid" + }, + "system_router_id": { + "description": "id for the system router where subnet default routes are registered", + "type": "string", + "format": "uuid" + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "dns_name", + "id", + "ipv6_prefix", + "name", + "project_id", + "system_router_id", + "time_created", + "time_modified" + ] + }, + "VpcCreate": { + "description": "Create-time parameters for a `Vpc`", + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "dns_name": { + "$ref": "#/components/schemas/Name" + }, + "ipv6_prefix": { + "nullable": true, + "description": "The IPv6 prefix for this VPC\n\nAll IPv6 subnets created from this VPC must be taken from this range, which should be a Unique Local Address in the range `fd00::/48`. The default VPC Subnet will have the first `/64` range from this prefix.", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv6Net" + } + ] + }, + "name": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "description", + "dns_name", + "name" + ] + }, + "VpcFirewallIcmpFilter": { + "type": "object", + "properties": { + "code": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/IcmpParamRange" + } + ] + }, + "icmp_type": { + "type": "integer", + "format": "uint8", + "minimum": 0 + } + }, + "required": [ + "icmp_type" + ] + }, + "VpcFirewallRule": { + "description": "A single rule in a VPC firewall", + "type": "object", + "properties": { + "action": { + "description": "Whether traffic matching the rule should be allowed or dropped", + "allOf": [ + { + "$ref": "#/components/schemas/VpcFirewallRuleAction" + } + ] + }, + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "direction": { + "description": "Whether this rule is for incoming or outgoing traffic", + "allOf": [ + { + "$ref": "#/components/schemas/VpcFirewallRuleDirection" + } + ] + }, + "filters": { + "description": "Reductions on the scope of the rule", + "allOf": [ + { + "$ref": "#/components/schemas/VpcFirewallRuleFilter" + } + ] + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "priority": { + "description": "The relative priority of this rule", + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "status": { + "description": "Whether this rule is in effect", + "allOf": [ + { + "$ref": "#/components/schemas/VpcFirewallRuleStatus" + } + ] + }, + "targets": { + "description": "Determine the set of instances that the rule applies to", + "type": "array", + "items": { + "$ref": "#/components/schemas/VpcFirewallRuleTarget" + } + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + }, + "vpc_id": { + "description": "The VPC to which this rule belongs", + "type": "string", + "format": "uuid" + } + }, + "required": [ + "action", + "description", + "direction", + "filters", + "id", + "name", + "priority", + "status", + "targets", + "time_created", + "time_modified", + "vpc_id" + ] + }, + "VpcFirewallRuleAction": { + "type": "string", + "enum": [ + "allow", + "deny" + ] + }, + "VpcFirewallRuleDirection": { + "type": "string", + "enum": [ + "inbound", + "outbound" + ] + }, + "VpcFirewallRuleFilter": { + "description": "Filters reduce the scope of a firewall rule. Without filters, the rule applies to all packets to the targets (or from the targets, if it's an outbound rule). With multiple filters, the rule applies only to packets matching ALL filters. The maximum number of each type of filter is 256.", + "type": "object", + "properties": { + "hosts": { + "nullable": true, + "description": "If present, host filters match the \"other end\" of traffic from the target’s perspective: for an inbound rule, they match the source of traffic. For an outbound rule, they match the destination.", + "type": "array", + "items": { + "$ref": "#/components/schemas/VpcFirewallRuleHostFilter" + }, + "maxItems": 256 + }, + "ports": { + "nullable": true, + "description": "If present, the destination ports or port ranges this rule applies to.", + "type": "array", + "items": { + "$ref": "#/components/schemas/L4PortRange" + }, + "maxItems": 256 + }, + "protocols": { + "nullable": true, + "description": "If present, the networking protocols this rule applies to.", + "type": "array", + "items": { + "$ref": "#/components/schemas/VpcFirewallRuleProtocol" + }, + "maxItems": 256 + } + } + }, + "VpcFirewallRuleHostFilter": { + "description": "The `VpcFirewallRuleHostFilter` is used to filter traffic on the basis of its source or destination host.", + "oneOf": [ + { + "description": "The rule applies to traffic from/to all instances in the VPC", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "vpc" + ] + }, + "value": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "description": "The rule applies to traffic from/to all instances in the VPC Subnet", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "subnet" + ] + }, + "value": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "description": "The rule applies to traffic from/to this specific instance", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "instance" + ] + }, + "value": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "description": "The rule applies to traffic from/to a specific IP address", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "ip" + ] + }, + "value": { + "type": "string", + "format": "ip" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "description": "The rule applies to traffic from/to a specific IP subnet", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "ip_net" + ] + }, + "value": { + "$ref": "#/components/schemas/IpNet" + } + }, + "required": [ + "type", + "value" + ] + } + ] + }, + "VpcFirewallRuleProtocol": { + "description": "The protocols that may be specified in a firewall rule's filter", + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "tcp" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "udp" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "icmp" + ] + }, + "value": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/VpcFirewallIcmpFilter" + } + ] + } + }, + "required": [ + "type", + "value" + ] + } + ] + }, + "VpcFirewallRuleStatus": { + "type": "string", + "enum": [ + "disabled", + "enabled" + ] + }, + "VpcFirewallRuleTarget": { + "description": "A `VpcFirewallRuleTarget` is used to specify the set of instances to which a firewall rule applies. You can target instances directly by name, or specify a VPC, VPC subnet, IP, or IP subnet, which will apply the rule to traffic going to all matching instances. Targets are additive: the rule applies to instances matching ANY target.", + "oneOf": [ + { + "description": "The rule applies to all instances in the VPC", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "vpc" + ] + }, + "value": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "description": "The rule applies to all instances in the VPC Subnet", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "subnet" + ] + }, + "value": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "description": "The rule applies to this specific instance", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "instance" + ] + }, + "value": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "description": "The rule applies to a specific IP address", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "ip" + ] + }, + "value": { + "type": "string", + "format": "ip" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "description": "The rule applies to a specific IP subnet", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "ip_net" + ] + }, + "value": { + "$ref": "#/components/schemas/IpNet" + } + }, + "required": [ + "type", + "value" + ] + } + ] + }, + "VpcFirewallRuleUpdate": { + "description": "A single rule in a VPC firewall", + "type": "object", + "properties": { + "action": { + "description": "Whether traffic matching the rule should be allowed or dropped", + "allOf": [ + { + "$ref": "#/components/schemas/VpcFirewallRuleAction" + } + ] + }, + "description": { + "description": "Human-readable free-form text about a resource", + "type": "string" + }, + "direction": { + "description": "Whether this rule is for incoming or outgoing traffic", + "allOf": [ + { + "$ref": "#/components/schemas/VpcFirewallRuleDirection" + } + ] + }, + "filters": { + "description": "Reductions on the scope of the rule", + "allOf": [ + { + "$ref": "#/components/schemas/VpcFirewallRuleFilter" + } + ] + }, + "name": { + "description": "Name of the rule, unique to this VPC", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "priority": { + "description": "The relative priority of this rule", + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "status": { + "description": "Whether this rule is in effect", + "allOf": [ + { + "$ref": "#/components/schemas/VpcFirewallRuleStatus" + } + ] + }, + "targets": { + "description": "Determine the set of instances that the rule applies to", + "type": "array", + "items": { + "$ref": "#/components/schemas/VpcFirewallRuleTarget" + }, + "maxItems": 256 + } + }, + "required": [ + "action", + "description", + "direction", + "filters", + "name", + "priority", + "status", + "targets" + ] + }, + "VpcFirewallRuleUpdateParams": { + "description": "Updated list of firewall rules. Will replace all existing rules.", + "type": "object", + "properties": { + "rules": { + "default": [], + "type": "array", + "items": { + "$ref": "#/components/schemas/VpcFirewallRuleUpdate" + }, + "maxItems": 1024 + } + } + }, + "VpcFirewallRules": { + "description": "Collection of a Vpc's firewall rules", + "type": "object", + "properties": { + "rules": { + "type": "array", + "items": { + "$ref": "#/components/schemas/VpcFirewallRule" + } + } + }, + "required": [ + "rules" + ] + }, + "VpcResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/Vpc" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "VpcRouter": { + "description": "A VPC router defines a series of rules that indicate where traffic should be sent depending on its destination.", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "kind": { + "$ref": "#/components/schemas/VpcRouterKind" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + }, + "vpc_id": { + "description": "The VPC to which the router belongs.", + "type": "string", + "format": "uuid" + } + }, + "required": [ + "description", + "id", + "kind", + "name", + "time_created", + "time_modified", + "vpc_id" + ] + }, + "VpcRouterCreate": { + "description": "Create-time parameters for a `VpcRouter`", + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "name": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "description", + "name" + ] + }, + "VpcRouterKind": { + "type": "string", + "enum": [ + "system", + "custom" + ] + }, + "VpcRouterResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/VpcRouter" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "VpcRouterUpdate": { + "description": "Updateable properties of a `VpcRouter`", + "type": "object", + "properties": { + "description": { + "nullable": true, + "type": "string" + }, + "name": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + } + } + }, + "VpcSubnet": { + "description": "A VPC subnet represents a logical grouping for instances that allows network traffic between them, within a IPv4 subnetwork or optionally an IPv6 subnetwork.", + "type": "object", + "properties": { + "custom_router_id": { + "nullable": true, + "description": "ID for an attached custom router.", + "type": "string", + "format": "uuid" + }, + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "ipv4_block": { + "description": "The IPv4 subnet CIDR block.", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv4Net" + } + ] + }, + "ipv6_block": { + "description": "The IPv6 subnet CIDR block.", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv6Net" + } + ] + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + }, + "vpc_id": { + "description": "The VPC to which the subnet belongs.", + "type": "string", + "format": "uuid" + } + }, + "required": [ + "description", + "id", + "ipv4_block", + "ipv6_block", + "name", + "time_created", + "time_modified", + "vpc_id" + ] + }, + "VpcSubnetCreate": { + "description": "Create-time parameters for a `VpcSubnet`", + "type": "object", + "properties": { + "custom_router": { + "nullable": true, + "description": "An optional router, used to direct packets sent from hosts in this subnet to any destination address.\n\nCustom routers apply in addition to the VPC-wide *system* router, and have higher priority than the system router for an otherwise equal-prefix-length match.", + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" + } + ] + }, + "description": { + "type": "string" + }, + "ipv4_block": { + "description": "The IPv4 address range for this subnet.\n\nIt must be allocated from an RFC 1918 private address range, and must not overlap with any other existing subnet in the VPC.", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv4Net" + } + ] + }, + "ipv6_block": { + "nullable": true, + "description": "The IPv6 address range for this subnet.\n\nIt must be allocated from the RFC 4193 Unique Local Address range, with the prefix equal to the parent VPC's prefix. A random `/64` block will be assigned if one is not provided. It must not overlap with any existing subnet in the VPC.", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv6Net" + } + ] + }, + "name": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "description", + "ipv4_block", + "name" + ] + }, + "VpcSubnetResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/VpcSubnet" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "VpcSubnetUpdate": { + "description": "Updateable properties of a `VpcSubnet`", + "type": "object", + "properties": { + "custom_router": { + "nullable": true, + "description": "An optional router, used to direct packets sent from hosts in this subnet to any destination address.", + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" + } + ] + }, + "description": { + "nullable": true, + "type": "string" + }, + "name": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + } + } + }, + "VpcUpdate": { + "description": "Updateable properties of a `Vpc`", + "type": "object", + "properties": { + "description": { + "nullable": true, + "type": "string" + }, + "dns_name": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "name": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + } + } + }, + "WebhookCreate": { + "description": "Create-time identity-related parameters", + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "endpoint": { + "description": "The URL that webhook notification requests should be sent to", + "type": "string", + "format": "uri" + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "secrets": { + "description": "A non-empty list of secret keys used to sign webhook payloads.", + "type": "array", + "items": { + "type": "string" + } + }, + "subscriptions": { + "description": "A list of webhook event class subscriptions.\n\nIf this list is empty or is not included in the request body, the webhook will not be subscribed to any events.", + "default": [], + "type": "array", + "items": { + "$ref": "#/components/schemas/AlertSubscription" + } + } + }, + "required": [ + "description", + "endpoint", + "name", + "secrets" + ] + }, + "WebhookDeliveryAttempt": { + "description": "An individual delivery attempt for a webhook event.\n\nThis represents a single HTTP request that was sent to the receiver, and its outcome.", + "type": "object", + "properties": { + "attempt": { + "description": "The attempt number.", + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "response": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/WebhookDeliveryResponse" + } + ] + }, + "result": { + "description": "The outcome of this delivery attempt: either the event was delivered successfully, or the request failed for one of several reasons.", + "allOf": [ + { + "$ref": "#/components/schemas/WebhookDeliveryAttemptResult" + } + ] + }, + "time_sent": { + "description": "The time at which the webhook delivery was attempted.", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "attempt", + "result", + "time_sent" + ] + }, + "WebhookDeliveryAttemptResult": { + "oneOf": [ + { + "description": "The webhook event has been delivered successfully.", + "type": "string", + "enum": [ + "succeeded" + ] + }, + { + "description": "A webhook request was sent to the endpoint, and it returned a HTTP error status code indicating an error.", + "type": "string", + "enum": [ + "failed_http_error" + ] + }, + { + "description": "The webhook request could not be sent to the receiver endpoint.", + "type": "string", + "enum": [ + "failed_unreachable" + ] + }, + { + "description": "A connection to the receiver endpoint was successfully established, but no response was received within the delivery timeout.", + "type": "string", + "enum": [ + "failed_timeout" + ] + } + ] + }, + "WebhookDeliveryResponse": { + "description": "The response received from a webhook receiver endpoint.", + "type": "object", + "properties": { + "duration_ms": { + "description": "The response time of the webhook endpoint, in milliseconds.", + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "status": { + "description": "The HTTP status code returned from the webhook endpoint.", + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "duration_ms", + "status" + ] + }, + "WebhookReceiver": { + "description": "The configuration for a webhook alert receiver.", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "endpoint": { + "description": "The URL that webhook notification requests are sent to.", + "type": "string", + "format": "uri" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "secrets": { + "type": "array", + "items": { + "$ref": "#/components/schemas/WebhookSecret" + } + }, + "subscriptions": { + "description": "The list of alert classes to which this receiver is subscribed.", + "type": "array", + "items": { + "$ref": "#/components/schemas/AlertSubscription" + } + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "endpoint", + "id", + "name", + "secrets", + "subscriptions", + "time_created", + "time_modified" + ] + }, + "WebhookReceiverUpdate": { + "description": "Parameters to update a webhook configuration.", + "type": "object", + "properties": { + "description": { + "nullable": true, + "type": "string" + }, + "endpoint": { + "nullable": true, + "description": "The URL that webhook notification requests should be sent to", + "type": "string", + "format": "uri" + }, + "name": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + } + } + }, + "WebhookSecret": { + "description": "A view of a shared secret key assigned to a webhook receiver.\n\nOnce a secret is created, the value of the secret is not available in the API, as it must remain secret. Instead, secrets are referenced by their unique IDs assigned when they are created.", + "type": "object", + "properties": { + "id": { + "description": "The public unique ID of the secret.", + "type": "string", + "format": "uuid" + }, + "time_created": { + "description": "The UTC timestamp at which this secret was created.", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "id", + "time_created" + ] + }, + "WebhookSecretCreate": { + "type": "object", + "properties": { + "secret": { + "description": "The value of the shared secret key.", + "type": "string" + } + }, + "required": [ + "secret" + ] + }, + "WebhookSecrets": { + "description": "A list of the IDs of secrets associated with a webhook receiver.", + "type": "object", + "properties": { + "secrets": { + "type": "array", + "items": { + "$ref": "#/components/schemas/WebhookSecret" + } + } + }, + "required": [ + "secrets" + ] + }, + "NameOrIdSortMode": { + "description": "Supported set of sort modes for scanning by name or id", + "oneOf": [ + { + "description": "sort in increasing order of \"name\"", + "type": "string", + "enum": [ + "name_ascending" + ] + }, + { + "description": "sort in decreasing order of \"name\"", + "type": "string", + "enum": [ + "name_descending" + ] + }, + { + "description": "sort in increasing order of \"id\"", + "type": "string", + "enum": [ + "id_ascending" + ] + } + ] + }, + "TimeAndIdSortMode": { + "description": "Supported set of sort modes for scanning by timestamp and ID", + "oneOf": [ + { + "description": "sort in increasing order of timestamp and ID, i.e., earliest first", + "type": "string", + "enum": [ + "time_and_id_ascending" + ] + }, + { + "description": "sort in increasing order of timestamp and ID, i.e., most recent first", + "type": "string", + "enum": [ + "time_and_id_descending" + ] + } + ] + }, + "IdSortMode": { + "description": "Supported set of sort modes for scanning by id only.\n\nCurrently, we only support scanning in ascending order.", + "oneOf": [ + { + "description": "sort in increasing order of \"id\"", + "type": "string", + "enum": [ + "id_ascending" + ] + } + ] + }, + "SystemMetricName": { + "type": "string", + "enum": [ + "virtual_disk_space_provisioned", + "cpus_provisioned", + "ram_provisioned" + ] + }, + "PaginationOrder": { + "description": "The order in which the client wants to page through the requested collection", + "type": "string", + "enum": [ + "ascending", + "descending" + ] + }, + "VersionSortMode": { + "description": "Supported sort modes when scanning by semantic version", + "oneOf": [ + { + "description": "Sort in increasing semantic version order (oldest first)", + "type": "string", + "enum": [ + "version_ascending" + ] + }, + { + "description": "Sort in decreasing semantic version order (newest first)", + "type": "string", + "enum": [ + "version_descending" + ] + } + ] + }, + "NameSortMode": { + "description": "Supported set of sort modes for scanning by name only\n\nCurrently, we only support scanning in ascending order.", + "oneOf": [ + { + "description": "sort in increasing order of \"name\"", + "type": "string", + "enum": [ + "name_ascending" + ] + } + ] + } + }, + "responses": { + "Error": { + "description": "Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Error" + } + } + } + } + } + }, + "tags": [ + { + "name": "affinity", + "description": "Anti-affinity groups give control over instance placement.", + "externalDocs": { + "url": "http://docs.oxide.computer/api/affinity" + } + }, + { + "name": "console-auth", + "description": "API for console authentication", + "externalDocs": { + "url": "http://docs.oxide.computer/api/console-auth" + } + }, + { + "name": "current-user", + "description": "Information pertaining to the current user.", + "externalDocs": { + "url": "http://docs.oxide.computer/api/current-user" + } + }, + { + "name": "disks", + "description": "Virtual disks are used to store instance-local data which includes the operating system.", + "externalDocs": { + "url": "http://docs.oxide.computer/api/disks" + } + }, + { + "name": "experimental", + "description": "Experimental, unstable interfaces, primarily for use by Oxide personnel", + "externalDocs": { + "url": "http://docs.oxide.computer/api/experimental" + } + }, + { + "name": "floating-ips", + "description": "Floating IPs allow a project to allocate well-known IPs to instances.", + "externalDocs": { + "url": "http://docs.oxide.computer/api/floating-ips" + } + }, + { + "name": "images", + "description": "Images are read-only virtual disks that may be used to boot virtual machines.", + "externalDocs": { + "url": "http://docs.oxide.computer/api/images" + } + }, + { + "name": "instances", + "description": "Virtual machine instances are the basic unit of computation. These operations are used for provisioning, controlling, and destroying instances.", + "externalDocs": { + "url": "http://docs.oxide.computer/api/instances" + } + }, + { + "name": "login", + "description": "Authentication endpoints", + "externalDocs": { + "url": "http://docs.oxide.computer/api/login" + } + }, + { + "name": "metrics", + "description": "Silo-scoped metrics", + "externalDocs": { + "url": "http://docs.oxide.computer/api/metrics" + } + }, + { + "name": "multicast-groups", + "description": "Multicast groups provide efficient one-to-many network communication.", + "externalDocs": { + "url": "http://docs.oxide.computer/api/multicast-groups" + } + }, + { + "name": "policy", + "description": "System-wide IAM policy", + "externalDocs": { + "url": "http://docs.oxide.computer/api/policy" + } + }, + { + "name": "projects", + "description": "Projects are a grouping of associated resources such as instances and disks within a silo for purposes of billing and access control.", + "externalDocs": { + "url": "http://docs.oxide.computer/api/projects" + } + }, + { + "name": "silos", + "description": "Silos represent a logical partition of users and resources.", + "externalDocs": { + "url": "http://docs.oxide.computer/api/silos" + } + }, + { + "name": "snapshots", + "description": "Snapshots of virtual disks at a particular point in time.", + "externalDocs": { + "url": "http://docs.oxide.computer/api/snapshots" + } + }, + { + "name": "system/alerts", + "description": "Alerts deliver notifications for events that occur on the Oxide rack", + "externalDocs": { + "url": "http://docs.oxide.computer/api/alerts" + } + }, + { + "name": "system/audit-log", + "description": "These endpoints relate to audit logs.", + "externalDocs": { + "url": "http://docs.oxide.computer/api/system-audit-log" + } + }, + { + "name": "system/hardware", + "description": "These operations pertain to hardware inventory and management. Racks are the unit of expansion of an Oxide deployment. Racks are in turn composed of sleds, switches, power supplies, and a cabled backplane.", + "externalDocs": { + "url": "http://docs.oxide.computer/api/system-hardware" + } + }, + { + "name": "system/ip-pools", + "description": "IP pools are collections of external IPs that can be assigned to silos. When a pool is linked to a silo, users in that silo can allocate IPs from the pool for their instances.", + "externalDocs": { + "url": "http://docs.oxide.computer/api/system-ip-pools" + } + }, + { + "name": "system/metrics", + "description": "Metrics provide insight into the operation of the Oxide deployment. These include telemetry on hardware and software components that can be used to understand the current state as well as to diagnose issues.", + "externalDocs": { + "url": "http://docs.oxide.computer/api/system-metrics" + } + }, + { + "name": "system/networking", + "description": "This provides rack-level network configuration.", + "externalDocs": { + "url": "http://docs.oxide.computer/api/system-networking" + } + }, + { + "name": "system/probes", + "description": "Probes for testing network connectivity", + "externalDocs": { + "url": "http://docs.oxide.computer/api/probes" + } + }, + { + "name": "system/silos", + "description": "Silos represent a logical partition of users and resources.", + "externalDocs": { + "url": "http://docs.oxide.computer/api/system-silos" + } + }, + { + "name": "system/status", + "description": "Endpoints related to system health", + "externalDocs": { + "url": "http://docs.oxide.computer/api/system-status" + } + }, + { + "name": "system/update", + "description": "Upload and manage system updates", + "externalDocs": { + "url": "http://docs.oxide.computer/api/system-update" + } + }, + { + "name": "tokens", + "description": "API clients use device access tokens for authentication.", + "externalDocs": { + "url": "http://docs.oxide.computer/api/tokens" + } + }, + { + "name": "vpcs", + "description": "Virtual Private Clouds (VPCs) provide isolated network environments for managing and deploying services.", + "externalDocs": { + "url": "http://docs.oxide.computer/api/vpcs" + } + } + ] +} diff --git a/openapi/nexus/nexus-latest.json b/openapi/nexus/nexus-latest.json index e9892fb0306..8aab5651cba 120000 --- a/openapi/nexus/nexus-latest.json +++ b/openapi/nexus/nexus-latest.json @@ -1 +1 @@ -nexus-2025120300.0.0-dfe193.json \ No newline at end of file +nexus-2025120500.0.0-c28237.json \ No newline at end of file diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 69d43b7e3be..f90302c0755 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -7158,7 +7158,11 @@ CREATE TABLE IF NOT EXISTS omicron.public.multicast_group_member ( /* Sync versioning */ version_added INT8 NOT NULL DEFAULT nextval('omicron.public.multicast_group_version'), - version_removed INT8 + version_removed INT8, + + /* Denormalized multicast IP from the group (for API convenience) */ + /* Note: Column added via migration, must be at end for schema compatibility */ + multicast_ip INET NOT NULL ); /* External Multicast Group Indexes */ @@ -7204,25 +7208,28 @@ CREATE INDEX IF NOT EXISTS external_multicast_by_underlay ON omicron.public.mult underlay_group_id ) WHERE time_deleted IS NULL AND underlay_group_id IS NOT NULL; --- State-based filtering for RPW reconciler --- Supports: SELECT ... WHERE state = ? AND time_deleted IS NULL -CREATE INDEX IF NOT EXISTS multicast_group_by_state ON omicron.public.multicast_group ( - state -) WHERE time_deleted IS NULL; - --- RPW reconciler composite queries (state + pool filtering) --- Supports: SELECT ... WHERE state = ? AND ip_pool_id = ? AND time_deleted IS NULL -CREATE INDEX IF NOT EXISTS multicast_group_reconciler_query ON omicron.public.multicast_group ( - state, - ip_pool_id -) WHERE time_deleted IS NULL; - -- Fleet-wide unique name constraint (groups are fleet-scoped like IP pools) -- Supports: SELECT ... WHERE name = ? AND time_deleted IS NULL CREATE UNIQUE INDEX IF NOT EXISTS lookup_multicast_group_by_name ON omicron.public.multicast_group ( name ) WHERE time_deleted IS NULL; +-- RPW cleanup of soft-deleted groups +-- Supports: SELECT ... WHERE state = 'deleting' (includes rows with time_deleted set) +-- Without WHERE clause to allow queries on Deleting state regardless of time_deleted +CREATE INDEX IF NOT EXISTS multicast_group_cleanup ON omicron.public.multicast_group ( + state, + id +); + +-- RPW queries for active groups (Creating, Active states) +-- Supports: SELECT ... WHERE state = ? AND time_deleted IS NULL ORDER BY id +-- Optimizes the common case of querying non-deleted groups by state with pagination +CREATE INDEX IF NOT EXISTS multicast_group_active ON omicron.public.multicast_group ( + state, + id +) WHERE time_deleted IS NULL; + /* Underlay Multicast Group Indexes */ -- Version tracking for Omicron internal change detection @@ -7390,7 +7397,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - (TRUE, NOW(), NOW(), '212.0.0', NULL) + (TRUE, NOW(), NOW(), '213.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/schema/crdb/multicast-member-ip-and-indexes/up01.sql b/schema/crdb/multicast-member-ip-and-indexes/up01.sql new file mode 100644 index 00000000000..f8a845f4080 --- /dev/null +++ b/schema/crdb/multicast-member-ip-and-indexes/up01.sql @@ -0,0 +1,34 @@ +-- Add multicast_ip column to multicast_group_member and update indexes +-- +-- This migration: +-- 1. Drops redundant/unused indexes on multicast_group +-- 2. Creates optimized indexes for RPW reconciler queries +-- 3. Adds multicast_ip to multicast_group_member (denormalized for API responses) + +-- Drop redundant indexes +-- multicast_group_by_state: replaced by multicast_group_active (supports pagination) +DROP INDEX IF EXISTS omicron.public.multicast_group_by_state; + +-- multicast_group_reconciler_query: unused (no queries filter by state + ip_pool_id) +DROP INDEX IF EXISTS omicron.public.multicast_group_reconciler_query; + +-- RPW cleanup of soft-deleted groups +-- Supports: SELECT ... WHERE state = 'deleting' (includes rows with time_deleted set) +-- Without WHERE clause to allow queries on Deleting state regardless of time_deleted +CREATE INDEX IF NOT EXISTS multicast_group_cleanup ON omicron.public.multicast_group ( + state, + id +); + +-- RPW queries for active groups (Creating, Active states) +-- Supports: SELECT ... WHERE state = ? AND time_deleted IS NULL ORDER BY id +-- Optimizes the common case of querying non-deleted groups by state with pagination +CREATE INDEX IF NOT EXISTS multicast_group_active ON omicron.public.multicast_group ( + state, + id +) WHERE time_deleted IS NULL; + +-- Denormalized multicast IP from the group (for API convenience) +-- Note: Column added via migration, must be at end for schema compatibility +ALTER TABLE omicron.public.multicast_group_member + ADD COLUMN IF NOT EXISTS multicast_ip INET NOT NULL;