diff --git a/omicron-common/src/api/external/mod.rs b/omicron-common/src/api/external/mod.rs index 816166b702..aa05f97606 100644 --- a/omicron-common/src/api/external/mod.rs +++ b/omicron-common/src/api/external/mod.rs @@ -459,7 +459,7 @@ impl Display for ResourceType { pub async fn to_list(object_stream: ObjectStream) -> Vec where - U: From, + T: Into, { object_stream .filter(|maybe_object| ready(maybe_object.is_ok())) @@ -532,12 +532,6 @@ pub struct ProjectView { pub identity: IdentityMetadata, } -impl From for ProjectView { - fn from(project: crate::api::internal::nexus::Project) -> Self { - ProjectView { identity: project.identity } - } -} - /** * Create-time parameters for an [`Project`] */ @@ -728,19 +722,6 @@ pub struct InstanceView { pub runtime: InstanceRuntimeStateView, } -impl From for InstanceView { - fn from(instance: crate::api::internal::nexus::Instance) -> Self { - InstanceView { - identity: instance.identity.clone(), - project_id: instance.project_id, - ncpus: instance.ncpus, - memory: instance.memory, - hostname: instance.hostname.clone(), - runtime: instance.runtime.into(), - } - } -} - /** * Create-time parameters for an [`Instance`] */ @@ -788,24 +769,6 @@ pub struct DiskView { pub device_path: String, } -impl From for DiskView { - fn from(disk: crate::api::internal::nexus::Disk) -> Self { - /* - * TODO-correctness: can the name always be used as a path like this - * or might it need to be sanitized? - */ - let device_path = format!("/mnt/{}", disk.identity.name.as_str()); - DiskView { - identity: disk.identity.clone(), - project_id: disk.project_id, - snapshot_id: disk.create_snapshot_id, - size: disk.size, - state: disk.runtime.disk_state, - device_path, - } - } -} - /** * State of a Disk (primarily: attached or not) */ @@ -948,12 +911,6 @@ pub struct RackView { pub identity: IdentityMetadata, } -impl From for RackView { - fn from(rack: crate::api::internal::nexus::Rack) -> Self { - RackView { identity: rack.identity } - } -} - /* * SLEDS */ @@ -969,15 +926,6 @@ pub struct SledView { pub service_address: SocketAddr, } -impl From for SledView { - fn from(sled: crate::api::internal::nexus::Sled) -> Self { - SledView { - identity: sled.identity.clone(), - service_address: sled.service_address, - } - } -} - /* * Sagas * diff --git a/omicron-common/src/api/internal/nexus.rs b/omicron-common/src/api/internal/nexus.rs index 5da8925798..ecae30ec6d 100644 --- a/omicron-common/src/api/internal/nexus.rs +++ b/omicron-common/src/api/internal/nexus.rs @@ -1,9 +1,6 @@ //! APIs exposed by Nexus. -use crate::api::external::{ - ByteCount, DiskState, Generation, IdentityMetadata, InstanceCpuCount, - InstanceState, -}; +use crate::api::external::{DiskState, Generation, InstanceState}; use chrono::{DateTime, Utc}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; @@ -11,37 +8,6 @@ use std::net::SocketAddr; use std::time::Duration; use uuid::Uuid; -pub struct Rack { - pub identity: IdentityMetadata, -} - -pub struct Sled { - pub identity: IdentityMetadata, - pub service_address: SocketAddr, -} - -/// A collection of associated resources. -pub struct Project { - /// common identifying metadata. - pub identity: IdentityMetadata, -} - -/// A Disk (network block device). -#[derive(Clone, Debug)] -pub struct Disk { - /// common identifying metadata. - pub identity: IdentityMetadata, - /// id for the project containing this Disk - pub project_id: Uuid, - /// id for the snapshot from which this Disk was created (None means a blank - /// disk) - pub create_snapshot_id: Option, - /// size of the Disk - pub size: ByteCount, - /// runtime state of the Disk - pub runtime: DiskRuntimeState, -} - /// Runtime state of the Disk, which includes its attach state and some minimal /// metadata #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] @@ -54,28 +20,6 @@ pub struct DiskRuntimeState { pub time_updated: DateTime, } -/// An Instance (VM). -#[derive(Clone, Debug)] -pub struct Instance { - /// common identifying metadata - pub identity: IdentityMetadata, - - /// id for the project containing this Instance - pub project_id: Uuid, - - /// number of CPUs allocated for this Instance - pub ncpus: InstanceCpuCount, - /// memory allocated for this Instance - pub memory: ByteCount, - /// RFC1035-compliant hostname for the Instance. - // TODO-cleanup different type? - pub hostname: String, - - /// state owned by the data plane - pub runtime: InstanceRuntimeState, - // TODO-completeness: add disks, network, tags, metrics -} - /// Runtime state of the Instance, including the actual running state and minimal /// metadata /// @@ -129,11 +73,3 @@ pub struct OximeterInfo { /// The address on which this oximeter instance listens for requests pub address: SocketAddr, } - -/// An assignment of an Oximeter instance to a metric producer for collection. -// TODO: Goes to storage -#[derive(Debug, Clone, Copy, JsonSchema, Serialize, Deserialize)] -pub struct OximeterAssignment { - pub oximeter_id: Uuid, - pub producer_id: Uuid, -} diff --git a/omicron-common/src/api/internal/sled_agent.rs b/omicron-common/src/api/internal/sled_agent.rs index 69fd9f6bc9..27653fdde8 100644 --- a/omicron-common/src/api/internal/sled_agent.rs +++ b/omicron-common/src/api/internal/sled_agent.rs @@ -39,12 +39,15 @@ impl DiskStateRequested { } } +/// Runtime state of an instance. +pub type InstanceRuntimeState = internal::nexus::InstanceRuntimeState; + /// Sent to a sled agent to establish the runtime state of an Instance #[derive(Serialize, Deserialize, JsonSchema)] pub struct InstanceEnsureBody { /// Last runtime state of the Instance known to Nexus (used if the agent /// has never seen this Instance before). - pub initial_runtime: internal::nexus::InstanceRuntimeState, + pub initial_runtime: InstanceRuntimeState, /// requested runtime state of the Instance pub target: InstanceRuntimeStateRequested, } diff --git a/omicron-common/src/model_db.rs b/omicron-common/src/model_db.rs index 431d8db40c..6786af4b4f 100644 --- a/omicron-common/src/model_db.rs +++ b/omicron-common/src/model_db.rs @@ -1,44 +1,15 @@ /*! * Facilities for mapping model Rust types to and from database types * - * We'd prefer to put this in omicron-nexus, but the Rust orphan rules prevent - * that. - * - * There are essentially two patterns used for database conversions: - * - * (1) For Rust types that map directly to database types, we impl - * tokio_postgres's [`tokio_postgres::types::ToSql`] and - * [`tokio_postgres::types::FromSql`] traits. For the most part, these are - * newtypes in Rust that wrap a type for which there is already an impl for - * these traits and we delegate to those impls where possible. For example, - * [`ByteCount`] is a numeric newtype containing a u64, which maps - * directly to a CockroachDB (PostgreSQL) `int`, which is essentially an - * `i64`. The `ToSql` and `FromSql` impls for `ByteCount` delegate to - * the existing impls for `i64`. - * - * (2) For Rust types that require multiple database values (e.g., an - * [`Disk`], which represents an entire row from the Disk table, we impl - * `TryFrom<&tokio_postgres::Row>`. The impl pulls multiple values out of - * the row, generally using the names of columns from the table. We also - * impl `SqlSerialize`, which records key-value pairs that can be safely - * inserted into SQL "UPDATE" and "INSERT" statements. - * - * These often combine to form a hierarchy. For example, to load an - * [`Project`] from a row from the Project table: - * - * * We start with `Project::try_from(row: &tokio_postgres::Row)`. - * * The main field of an `Project` is its `IdentityMetadata`, so - * `Project::try_from` invokes `IdentityMetadata::try_from(row)` with - * the same `row` argument. - * * `IdentityMetadata` pulls out the fields that it knows about, including - * `id` and `name`. - * * `name` is an [`Name`], which is a newtype that wraps a Rust `String`. - * This has a `FromSql` impl. - * - * The ToSql/FromSql, and TryFrom impls are in this file because the Rust orphan - * rules prevent them from going in omicron_nexus. We're not so restricted on - * the SqlSerialize interface, and we don't want to have to pull in the other - * facilities that those impls depend on, so those remain in omicron_nexus. + * For Rust types that map directly to database types, we impl + * tokio_postgres's [`tokio_postgres::types::ToSql`] and + * [`tokio_postgres::types::FromSql`] traits. For the most part, these are + * newtypes in Rust that wrap a type for which there is already an impl for + * these traits and we delegate to those impls where possible. For example, + * [`ByteCount`] is a numeric newtype containing a u64, which maps + * directly to a CockroachDB (PostgreSQL) `int`, which is essentially an + * `i64`. The `ToSql` and `FromSql` impls for `ByteCount` delegate to + * the existing impls for `i64`. */ /* * TODO-cleanup We could potentially derive these TryFrom impls. Diesel and @@ -49,37 +20,13 @@ * TODO-coverage tests for these FromSql and ToSql implementations */ -use std::net::{IpAddr, SocketAddr}; -use std::time::Duration; - -use super::db::sql_row_value; use crate::api::external::ByteCount; -use crate::api::external::DiskAttachment; -use crate::api::external::DiskState; -use crate::api::external::Error; use crate::api::external::Generation; -use crate::api::external::IdentityMetadata; use crate::api::external::InstanceCpuCount; -use crate::api::external::InstanceState; use crate::api::external::MacAddr; use crate::api::external::Name; -use crate::api::external::NetworkInterface; -use crate::api::external::Vpc; -use crate::api::external::VpcSubnet; use crate::api::external::{Ipv4Net, Ipv6Net}; -use crate::api::internal::nexus::Disk; -use crate::api::internal::nexus::DiskRuntimeState; -use crate::api::internal::nexus::Instance; -use crate::api::internal::nexus::InstanceRuntimeState; -use crate::api::internal::nexus::OximeterAssignment; -use crate::api::internal::nexus::OximeterInfo; -use crate::api::internal::nexus::ProducerEndpoint; -use crate::api::internal::nexus::Project; -use crate::bail_unless; -use chrono::DateTime; -use chrono::Utc; use std::convert::TryFrom; -use uuid::Uuid; /* * FromSql/ToSql impls used for simple Rust types @@ -246,237 +193,6 @@ impl TryFrom for MacAddr { } impl_sql_wrapping!(MacAddr, String); -/* - * TryFrom impls used for more complex Rust types - */ - -/// Load an [`IdentityMetadata`] from a row of any table that contains the -/// usual identity fields: "id", "name", "description, "time_created", and -/// "time_modified". -impl TryFrom<&tokio_postgres::Row> for IdentityMetadata { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - let time_deleted: Option> = - sql_row_value(value, "time_deleted")?; - /* - * We could support representing deleted objects, but we would want to - * think about how to do that. For example, we might want to use - * separate types so that the control plane can't accidentally do things - * like attach a disk to a deleted Instance. We haven't figured any of - * this out, and there's no need yet. - */ - bail_unless!( - time_deleted.is_none(), - "model does not support objects that have been deleted" - ); - Ok(IdentityMetadata { - id: sql_row_value(value, "id")?, - name: sql_row_value(value, "name")?, - description: sql_row_value(value, "description")?, - time_created: sql_row_value(value, "time_created")?, - time_modified: sql_row_value(value, "time_modified")?, - }) - } -} - -impl TryFrom<&tokio_postgres::Row> for InstanceState { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - let variant: &str = sql_row_value(value, "instance_state")?; - InstanceState::try_from(variant) - .map_err(|err| Error::InternalError { message: err }) - } -} - -/// Load an [`Project`] from a whole row of the "Project" table. -impl TryFrom<&tokio_postgres::Row> for Project { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - Ok(Project { identity: IdentityMetadata::try_from(value)? }) - } -} - -/// Load an [`Instance`] from a whole row of the "Instance" table. -impl TryFrom<&tokio_postgres::Row> for Instance { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - Ok(Instance { - identity: IdentityMetadata::try_from(value)?, - project_id: sql_row_value(value, "project_id")?, - ncpus: sql_row_value(value, "ncpus")?, - memory: sql_row_value(value, "memory")?, - hostname: sql_row_value(value, "hostname")?, - runtime: InstanceRuntimeState::try_from(value)?, - }) - } -} - -/// Load an [`InstanceRuntimeState`] from a row of the "Instance" table, -/// using the "instance_state", "active_server_id", "state_generation", and -/// "time_state_updated" columns. -impl TryFrom<&tokio_postgres::Row> for InstanceRuntimeState { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - Ok(InstanceRuntimeState { - run_state: InstanceState::try_from(value)?, - sled_uuid: sql_row_value(value, "active_server_id")?, - gen: sql_row_value(value, "state_generation")?, - time_updated: sql_row_value(value, "time_state_updated")?, - }) - } -} - -/// Load an [`Disk`] from a row of the "Disk" table. -impl TryFrom<&tokio_postgres::Row> for Disk { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - Ok(Disk { - identity: IdentityMetadata::try_from(value)?, - project_id: sql_row_value(value, "project_id")?, - create_snapshot_id: sql_row_value(value, "origin_snapshot")?, - size: sql_row_value(value, "size_bytes")?, - runtime: DiskRuntimeState::try_from(value)?, - }) - } -} - -/// Load an [`DiskAttachment`] from a database row containing those columns -/// of the Disk table that describe the attachment: "id", "name", "disk_state", -/// "attach_instance_id" -impl TryFrom<&tokio_postgres::Row> for DiskAttachment { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - Ok(DiskAttachment { - instance_id: sql_row_value(value, "attach_instance_id")?, - disk_id: sql_row_value(value, "id")?, - disk_name: sql_row_value(value, "name")?, - disk_state: DiskState::try_from(value)?, - }) - } -} - -/// Load an [`DiskRuntimeState`'] from a row from the Disk table, using the -/// columns needed for [`DiskState`], plus "state_generation" and -/// "time_state_updated". -impl TryFrom<&tokio_postgres::Row> for DiskRuntimeState { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - Ok(DiskRuntimeState { - disk_state: DiskState::try_from(value)?, - gen: sql_row_value(value, "state_generation")?, - time_updated: sql_row_value(value, "time_state_updated")?, - }) - } -} - -/// Load an [`DiskState`] from a row from the Disk table, using the columns -/// "disk_state" and "attach_instance_id". -impl TryFrom<&tokio_postgres::Row> for DiskState { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - let disk_state_str: &str = sql_row_value(value, "disk_state")?; - let instance_uuid: Option = - sql_row_value(value, "attach_instance_id")?; - DiskState::try_from((disk_state_str, instance_uuid)) - .map_err(|e| Error::internal_error(&e)) - } -} - -/// Load a [`ProducerEndpoint`] from a row in the `MetricProducer` table, using -/// the columns "id", "ip", "port", "interval", and "route" -impl TryFrom<&tokio_postgres::Row> for ProducerEndpoint { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - let id: Uuid = sql_row_value(value, "id")?; - let ip: IpAddr = sql_row_value(value, "ip")?; - let port: i32 = sql_row_value(value, "port")?; - let address = SocketAddr::new(ip, port as _); - let base_route: String = sql_row_value(value, "route")?; - let interval = - Duration::from_secs_f64(sql_row_value(value, "interval")?); - Ok(Self { id, address, base_route, interval }) - } -} - -/// Load an [`OximeterInfo`] from a row in the `Oximeter` table, using the -/// columns "id", "ip", and "port". -impl TryFrom<&tokio_postgres::Row> for OximeterInfo { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - let collector_id: Uuid = sql_row_value(value, "id")?; - let ip: IpAddr = sql_row_value(value, "ip")?; - let port: i32 = sql_row_value(value, "port")?; - let address = SocketAddr::new(ip, port as _); - Ok(Self { collector_id, address }) - } -} - -/// Load an [`OximeterAssignment`] from a row in the `OximeterAssignment` -/// table, using the columns "oximeter_id" and "producer_id" -impl TryFrom<&tokio_postgres::Row> for OximeterAssignment { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - let oximeter_id: Uuid = sql_row_value(value, "oximeter_id")?; - let producer_id: Uuid = sql_row_value(value, "producer_id")?; - Ok(Self { oximeter_id, producer_id }) - } -} - -/// Load an [`Vpc`] from a row in the `Vpc` table. -impl TryFrom<&tokio_postgres::Row> for Vpc { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - Ok(Self { - identity: IdentityMetadata::try_from(value)?, - project_id: sql_row_value(value, "project_id")?, - dns_name: sql_row_value(value, "dns_name")?, - vpc_subnets: vec![], - }) - } -} - -/// Load a [`VpcSubnet`] from a row in the `VpcSubnet` table. -impl TryFrom<&tokio_postgres::Row> for VpcSubnet { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - Ok(Self { - identity: IdentityMetadata::try_from(value)?, - vpc_id: sql_row_value(value, "vpc_id")?, - ipv4_block: sql_row_value(value, "ipv4_block")?, - ipv6_block: sql_row_value(value, "ipv6_block")?, - }) - } -} - -/// Load a [`NetworkInterface`] from a row in the `NetworkInterface` table. -impl TryFrom<&tokio_postgres::Row> for NetworkInterface { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - Ok(Self { - identity: IdentityMetadata::try_from(value)?, - vpc_id: sql_row_value(value, "vpc_id")?, - subnet_id: sql_row_value(value, "subnet_id")?, - mac: sql_row_value(value, "mac")?, - ip: sql_row_value(value, "ip")?, - }) - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/omicron-common/src/sled_agent_client.rs b/omicron-common/src/sled_agent_client.rs index d2a7cb1a2e..969a4c8e4f 100644 --- a/omicron-common/src/sled_agent_client.rs +++ b/omicron-common/src/sled_agent_client.rs @@ -5,13 +5,12 @@ * generated by the server. */ +use crate::api; use crate::api::external::Error; use crate::api::internal::nexus::DiskRuntimeState; -use crate::api::internal::nexus::InstanceRuntimeState; use crate::api::internal::sled_agent::DiskEnsureBody; use crate::api::internal::sled_agent::DiskStateRequested; use crate::api::internal::sled_agent::InstanceEnsureBody; -use crate::api::internal::sled_agent::InstanceRuntimeStateRequested; use crate::http_client::HttpClient; use async_trait::async_trait; use http::Method; @@ -54,9 +53,9 @@ impl Client { pub async fn instance_ensure( self: &Arc, instance_id: Uuid, - initial_runtime: InstanceRuntimeState, - target: InstanceRuntimeStateRequested, - ) -> Result { + initial_runtime: api::internal::sled_agent::InstanceRuntimeState, + target: api::internal::sled_agent::InstanceRuntimeStateRequested, + ) -> Result { let path = format!("/instances/{}", instance_id); let body = Body::from( serde_json::to_string(&InstanceEnsureBody { @@ -71,7 +70,7 @@ impl Client { assert!(response.status().is_success()); let value = self .client - .read_json::( + .read_json::( &self.client.error_message_base(&Method::PUT, path.as_str()), &mut response, ) diff --git a/omicron-nexus/src/db/conversions.rs b/omicron-nexus/src/db/conversions.rs deleted file mode 100644 index cbc811dd0b..0000000000 --- a/omicron-nexus/src/db/conversions.rs +++ /dev/null @@ -1,117 +0,0 @@ -/*! - * Facilities for mapping Rust types to database types - * - * See omicron-common/src/model_db.rs. - */ - -use chrono::DateTime; -use chrono::Utc; -use omicron_common::api::external::DiskCreateParams; -use omicron_common::api::external::DiskState; -use omicron_common::api::external::IdentityMetadataCreateParams; -use omicron_common::api::external::InstanceCreateParams; -use omicron_common::api::external::InstanceState; -use omicron_common::api::external::ProjectCreateParams; -use omicron_common::api::external::VpcCreateParams; -use omicron_common::api::internal::nexus::DiskRuntimeState; -use omicron_common::api::internal::nexus::InstanceRuntimeState; -use omicron_common::api::internal::nexus::OximeterAssignment; -use omicron_common::api::internal::nexus::OximeterInfo; -use omicron_common::api::internal::nexus::ProducerEndpoint; - -use super::sql::SqlSerialize; -use super::sql::SqlValueSet; - -impl SqlSerialize for IdentityMetadataCreateParams { - fn sql_serialize(&self, output: &mut SqlValueSet) { - output.set("name", &self.name); - output.set("description", &self.description); - output.set("time_deleted", &(None as Option>)); - } -} - -impl SqlSerialize for ProjectCreateParams { - fn sql_serialize(&self, output: &mut SqlValueSet) { - self.identity.sql_serialize(output) - } -} - -impl SqlSerialize for InstanceCreateParams { - fn sql_serialize(&self, output: &mut SqlValueSet) { - self.identity.sql_serialize(output); - output.set("ncpus", &self.ncpus); - output.set("memory", &self.memory); - output.set("hostname", &self.hostname); - } -} - -impl SqlSerialize for InstanceState { - fn sql_serialize(&self, output: &mut SqlValueSet) { - output.set("instance_state", &self.label()); - } -} - -impl SqlSerialize for InstanceRuntimeState { - fn sql_serialize(&self, output: &mut SqlValueSet) { - self.run_state.sql_serialize(output); - output.set("active_server_id", &self.sled_uuid); - output.set("state_generation", &self.gen); - output.set("time_state_updated", &self.time_updated); - } -} - -impl SqlSerialize for DiskCreateParams { - fn sql_serialize(&self, output: &mut SqlValueSet) { - self.identity.sql_serialize(output); - output.set("size_bytes", &self.size); - output.set("origin_snapshot", &self.snapshot_id); - } -} - -impl SqlSerialize for DiskRuntimeState { - fn sql_serialize(&self, output: &mut SqlValueSet) { - self.disk_state.sql_serialize(output); - output.set("state_generation", &self.gen); - output.set("time_state_updated", &self.time_updated); - } -} - -impl SqlSerialize for DiskState { - fn sql_serialize(&self, output: &mut SqlValueSet) { - let attach_id = &self.attached_instance_id().map(|id| *id); - output.set("attach_instance_id", attach_id); - output.set("disk_state", &self.label()); - } -} - -impl SqlSerialize for VpcCreateParams { - fn sql_serialize(&self, output: &mut SqlValueSet) { - self.identity.sql_serialize(output); - output.set("dns_name", &self.dns_name); - } -} - -impl SqlSerialize for OximeterInfo { - fn sql_serialize(&self, output: &mut SqlValueSet) { - output.set("id", &self.collector_id); - output.set("ip", &self.address.ip()); - output.set("port", &i32::from(self.address.port())); - } -} - -impl SqlSerialize for ProducerEndpoint { - fn sql_serialize(&self, output: &mut SqlValueSet) { - output.set("id", &self.id); - output.set("ip", &self.address.ip()); - output.set("port", &i32::from(self.address.port())); - output.set("interval", &self.interval.as_secs_f64()); - output.set("route", &self.base_route); - } -} - -impl SqlSerialize for OximeterAssignment { - fn sql_serialize(&self, output: &mut SqlValueSet) { - output.set("oximeter_id", &self.oximeter_id); - output.set("producer_id", &self.producer_id); - } -} diff --git a/omicron-nexus/src/db/datastore.rs b/omicron-nexus/src/db/datastore.rs index fe9a549663..5540f283f0 100644 --- a/omicron-nexus/src/db/datastore.rs +++ b/omicron-nexus/src/db/datastore.rs @@ -31,9 +31,6 @@ use omicron_common::api::external::LookupResult; use omicron_common::api::external::Name; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; -use omicron_common::api::internal::nexus::OximeterAssignment; -use omicron_common::api::internal::nexus::OximeterInfo; -use omicron_common::api::internal::nexus::ProducerEndpoint; use omicron_common::bail_unless; use omicron_common::db::sql_row_value; use std::convert::TryFrom; @@ -76,31 +73,22 @@ impl DataStore { } /// Create a project - pub async fn project_create_with_id( + pub async fn project_create( &self, - new_id: &Uuid, - new_project: &api::external::ProjectCreateParams, - ) -> CreateResult { + new_project: &db::model::Project, + ) -> CreateResult { let client = self.pool.acquire().await?; - let now = Utc::now(); let mut values = SqlValueSet::new(); - values.set("id", new_id); - values.set("time_created", &now); - values.set("time_modified", &now); new_project.sql_serialize(&mut values); - sql_insert_unique::( - &client, - &values, - &new_project.identity.name.as_str(), - ) - .await + sql_insert_unique::(&client, &values, &new_project.name()) + .await } /// Fetch metadata for a project pub async fn project_fetch( &self, project_name: &Name, - ) -> LookupResult { + ) -> LookupResult { let client = self.pool.acquire().await?; sql_fetch_row_by::( &client, @@ -155,7 +143,7 @@ impl DataStore { pub async fn projects_list_by_id( &self, pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResult { + ) -> ListResult { let client = self.pool.acquire().await?; sql_fetch_page_from_table::( &client, @@ -169,7 +157,7 @@ impl DataStore { pub async fn projects_list_by_name( &self, pagparams: &DataPageParams<'_, Name>, - ) -> ListResult { + ) -> ListResult { let client = self.pool.acquire().await?; sql_fetch_page_by::< LookupByUniqueName, @@ -184,7 +172,7 @@ impl DataStore { &self, project_name: &Name, update_params: &api::external::ProjectUpdateParams, - ) -> UpdateResult { + ) -> UpdateResult { let client = self.pool.acquire().await?; let now = Utc::now(); @@ -213,7 +201,7 @@ impl DataStore { Error::not_found_by_name(ResourceType::Project, project_name) }) .await?; - Ok(api::internal::nexus::Project::try_from(&row)?) + Ok(db::model::Project::try_from(&row)?) } /* @@ -250,17 +238,17 @@ impl DataStore { instance_id: &Uuid, project_id: &Uuid, params: &api::external::InstanceCreateParams, - runtime_initial: &api::internal::nexus::InstanceRuntimeState, - ) -> CreateResult { + runtime_initial: &db::model::InstanceRuntimeState, + ) -> CreateResult { let client = self.pool.acquire().await?; - let now = runtime_initial.time_updated; let mut values = SqlValueSet::new(); - values.set("id", instance_id); - values.set("time_created", &now); - values.set("time_modified", &now); - values.set("project_id", project_id); - params.sql_serialize(&mut values); - runtime_initial.sql_serialize(&mut values); + let instance = db::model::Instance::new( + *instance_id, + *project_id, + params, + runtime_initial.clone(), + ); + instance.sql_serialize(&mut values); let instance = sql_insert_unique_idempotent_and_fetch::< Instance, LookupByUniqueId, @@ -275,8 +263,8 @@ impl DataStore { .await?; bail_unless!( - instance.runtime.run_state - == api::external::InstanceState::Creating, + instance.runtime.run_state.state() + == &api::external::InstanceState::Creating, "newly-created Instance has unexpected state: {:?}", instance.runtime.run_state ); @@ -292,7 +280,7 @@ impl DataStore { &self, project_id: &Uuid, pagparams: &DataPageParams<'_, Name>, - ) -> ListResult { + ) -> ListResult { let client = self.pool.acquire().await?; sql_fetch_page_by::< LookupByUniqueNameInProject, @@ -305,7 +293,7 @@ impl DataStore { pub async fn instance_fetch( &self, instance_id: &Uuid, - ) -> LookupResult { + ) -> LookupResult { let client = self.pool.acquire().await?; sql_fetch_row_by::(&client, (), instance_id) .await @@ -315,7 +303,7 @@ impl DataStore { &self, project_id: &Uuid, instance_name: &Name, - ) -> LookupResult { + ) -> LookupResult { let client = self.pool.acquire().await?; sql_fetch_row_by::( &client, @@ -337,7 +325,7 @@ impl DataStore { pub async fn instance_update_runtime( &self, instance_id: &Uuid, - new_runtime: &api::internal::nexus::InstanceRuntimeState, + new_runtime: &db::model::InstanceRuntimeState, ) -> Result { let client = self.pool.acquire().await?; @@ -380,7 +368,8 @@ impl DataStore { let now = Utc::now(); let mut values = SqlValueSet::new(); - api::external::InstanceState::Destroyed.sql_serialize(&mut values); + db::model::InstanceState::new(api::external::InstanceState::Destroyed) + .sql_serialize(&mut values); values.set("time_deleted", &now); let mut cond_sql = SqlString::new(); @@ -431,12 +420,12 @@ impl DataStore { &self, instance_id: &Uuid, pagparams: &DataPageParams<'_, Name>, - ) -> ListResult { + ) -> ListResult { let client = self.pool.acquire().await?; sql_fetch_page_by::< LookupByAttachedInstance, Disk, - api::external::DiskAttachment, + db::model::DiskAttachment, >( &client, (instance_id,), @@ -451,23 +440,22 @@ impl DataStore { disk_id: &Uuid, project_id: &Uuid, params: &api::external::DiskCreateParams, - runtime_initial: &api::internal::nexus::DiskRuntimeState, - ) -> CreateResult { + runtime_initial: &db::model::DiskRuntimeState, + ) -> CreateResult { /* * See project_create_instance() for a discussion of how this function * works. The pattern here is nearly identical. */ let client = self.pool.acquire().await?; - let now = runtime_initial.time_updated; let mut values = SqlValueSet::new(); - values.set("id", disk_id); - values.set("time_created", &now); - values.set("time_modified", &now); - values.set("project_id", project_id); - params.sql_serialize(&mut values); - runtime_initial.sql_serialize(&mut values); - - let disk: api::internal::nexus::Disk = + let disk = db::model::Disk::new( + *disk_id, + *project_id, + params.clone(), + runtime_initial.clone(), + ); + disk.sql_serialize(&mut values); + let disk = sql_insert_unique_idempotent_and_fetch::( &client, &mut values, @@ -479,7 +467,8 @@ impl DataStore { .await?; bail_unless!( - disk.runtime.disk_state == api::external::DiskState::Creating, + disk.runtime.disk_state.state() + == &api::external::DiskState::Creating, "newly-created Disk has unexpected state: {:?}", disk.runtime.disk_state ); @@ -495,7 +484,7 @@ impl DataStore { &self, project_id: &Uuid, pagparams: &DataPageParams<'_, Name>, - ) -> ListResult { + ) -> ListResult { let client = self.pool.acquire().await?; sql_fetch_page_by::< LookupByUniqueNameInProject, @@ -508,7 +497,7 @@ impl DataStore { pub async fn disk_update_runtime( &self, disk_id: &Uuid, - new_runtime: &api::internal::nexus::DiskRuntimeState, + new_runtime: &db::model::DiskRuntimeState, ) -> Result { let client = self.pool.acquire().await?; @@ -537,7 +526,7 @@ impl DataStore { pub async fn disk_fetch( &self, disk_id: &Uuid, - ) -> LookupResult { + ) -> LookupResult { let client = self.pool.acquire().await?; sql_fetch_row_by::(&client, (), disk_id).await } @@ -546,7 +535,7 @@ impl DataStore { &self, project_id: &Uuid, disk_name: &Name, - ) -> LookupResult { + ) -> LookupResult { let client = self.pool.acquire().await?; sql_fetch_row_by::( &client, @@ -562,7 +551,8 @@ impl DataStore { let mut values = SqlValueSet::new(); values.set("time_deleted", &now); - api::external::DiskState::Destroyed.sql_serialize(&mut values); + db::model::DiskState::new(api::external::DiskState::Destroyed) + .sql_serialize(&mut values); let mut cond_sql = SqlString::new(); let disk_state_detached = @@ -613,13 +603,10 @@ impl DataStore { // Create a record for a new Oximeter instance pub async fn oximeter_create( &self, - info: &OximeterInfo, + info: &db::model::OximeterInfo, ) -> Result<(), Error> { let client = self.pool.acquire().await?; - let now = Utc::now(); let mut values = SqlValueSet::new(); - values.set("time_created", &now); - values.set("time_modified", &now); info.sql_serialize(&mut values); sql_insert::(&client, &values).await } @@ -627,13 +614,10 @@ impl DataStore { // Create a record for a new producer endpoint pub async fn producer_endpoint_create( &self, - producer: &ProducerEndpoint, + producer: &db::model::ProducerEndpoint, ) -> Result<(), Error> { let client = self.pool.acquire().await?; - let now = Utc::now(); let mut values = SqlValueSet::new(); - values.set("time_created", &now); - values.set("time_modified", &now); producer.sql_serialize(&mut values); sql_insert::(&client, &values).await } @@ -648,7 +632,7 @@ impl DataStore { let now = Utc::now(); let mut values = SqlValueSet::new(); values.set("time_created", &now); - let reg = OximeterAssignment { oximeter_id, producer_id }; + let reg = db::model::OximeterAssignment { oximeter_id, producer_id }; reg.sql_serialize(&mut values); sql_insert::(&client, &values).await } @@ -737,7 +721,7 @@ impl DataStore { &self, project_id: &Uuid, pagparams: &DataPageParams<'_, Name>, - ) -> ListResult { + ) -> ListResult { let client = self.pool.acquire().await?; sql_fetch_page_by::< LookupByUniqueNameInProject, @@ -752,15 +736,11 @@ impl DataStore { vpc_id: &Uuid, project_id: &Uuid, params: &api::external::VpcCreateParams, - ) -> Result { + ) -> Result { let client = self.pool.acquire().await?; - let now = Utc::now(); let mut values = SqlValueSet::new(); - values.set("id", vpc_id); - values.set("time_created", &now); - values.set("time_modified", &now); - values.set("project_id", project_id); - params.sql_serialize(&mut values); + let vpc = db::model::Vpc::new(*vpc_id, *project_id, params.clone()); + vpc.sql_serialize(&mut values); sql_insert_unique_idempotent_and_fetch::( &client, @@ -820,7 +800,7 @@ impl DataStore { &self, project_id: &Uuid, vpc_name: &Name, - ) -> LookupResult { + ) -> LookupResult { let client = self.pool.acquire().await?; sql_fetch_row_by::( &client, diff --git a/omicron-nexus/src/db/mod.rs b/omicron-nexus/src/db/mod.rs index 9bfa074f9a..b32ffc063f 100644 --- a/omicron-nexus/src/db/mod.rs +++ b/omicron-nexus/src/db/mod.rs @@ -3,7 +3,6 @@ */ mod config; -mod conversions; mod datastore; mod operations; mod pool; @@ -12,6 +11,7 @@ mod saga_types; mod sec_store; mod sql_operations; +pub mod model; pub mod schema; pub mod sql; /* public for examples only */ diff --git a/omicron-nexus/src/db/model.rs b/omicron-nexus/src/db/model.rs new file mode 100644 index 0000000000..383f1da299 --- /dev/null +++ b/omicron-nexus/src/db/model.rs @@ -0,0 +1,838 @@ +//! Structures stored to the database. + +use chrono::{DateTime, Utc}; +use omicron_common::api::external::{ + self, ByteCount, Error, Generation, InstanceCpuCount, +}; +use omicron_common::api::internal; +use omicron_common::db::sql_row_value; +use std::convert::TryFrom; +use std::net::{IpAddr, SocketAddr}; +use std::time::Duration; +use uuid::Uuid; + +use super::sql::SqlSerialize; +use super::sql::SqlValueSet; + +// TODO: Break up types into multiple files + +// NOTE: This object is not currently stored in the database. +// +// However, it likely will be in the future - for the single-rack +// case, however, it is synthesized. +pub struct Rack { + pub identity: IdentityMetadata, +} + +impl Into for Rack { + fn into(self) -> external::RackView { + external::RackView { identity: self.identity.into() } + } +} + +// NOTE: This object is not currently stored in the database. +// +// However, it likely will be in the future. At the moment, +// Nexus simply reports all the live connections it knows about. +pub struct Sled { + pub identity: IdentityMetadata, + pub service_address: SocketAddr, +} + +impl Into for Sled { + fn into(self) -> external::SledView { + external::SledView { + identity: self.identity.into(), + service_address: self.service_address, + } + } +} + +#[derive(Clone, Debug)] +pub struct IdentityMetadata { + pub id: Uuid, + pub name: external::Name, + pub description: String, + pub time_created: DateTime, + pub time_modified: DateTime, +} + +impl IdentityMetadata { + fn new(id: Uuid, params: external::IdentityMetadataCreateParams) -> Self { + let now = Utc::now(); + Self { + id, + name: params.name, + description: params.description, + time_created: now, + time_modified: now, + } + } +} + +impl Into for IdentityMetadata { + fn into(self) -> external::IdentityMetadata { + external::IdentityMetadata { + id: self.id, + name: self.name, + description: self.description, + time_created: self.time_created, + time_modified: self.time_modified, + } + } +} + +impl From for IdentityMetadata { + fn from(metadata: external::IdentityMetadata) -> Self { + Self { + id: metadata.id, + name: metadata.name, + description: metadata.description, + time_created: metadata.time_created, + time_modified: metadata.time_modified, + } + } +} + +/// Serialization to DB. +impl SqlSerialize for IdentityMetadata { + fn sql_serialize(&self, output: &mut SqlValueSet) { + output.set("id", &self.id); + output.set("name", &self.name); + output.set("description", &self.description); + output.set("time_created", &self.time_created); + output.set("time_modified", &self.time_modified); + + // TODO: Is this right? When should this be set? + output.set("time_deleted", &(None as Option>)); + } +} + +/// Deserialization from the DB. +impl TryFrom<&tokio_postgres::Row> for IdentityMetadata { + type Error = Error; + + fn try_from(value: &tokio_postgres::Row) -> Result { + let time_deleted: Option> = + sql_row_value(value, "time_deleted")?; + + // We could support representing deleted objects, but we would want to + // think about how to do that. For example, we might want to use + // separate types so that the control plane can't accidentally do things + // like attach a disk to a deleted Instance. We haven't figured any of + // this out, and there's no need yet. + if time_deleted.is_some() { + return Err(external::Error::internal_error( + "model does not support objects that have been deleted", + )); + } + Ok(IdentityMetadata { + id: sql_row_value(value, "id")?, + name: sql_row_value(value, "name")?, + description: sql_row_value(value, "description")?, + time_created: sql_row_value(value, "time_created")?, + time_modified: sql_row_value(value, "time_modified")?, + }) + } +} + +/// Describes a project within the database. +pub struct Project { + identity: IdentityMetadata, +} + +impl Project { + /// Creates a new database Project object. + pub fn new(params: &external::ProjectCreateParams) -> Self { + let id = Uuid::new_v4(); + Self { identity: IdentityMetadata::new(id, params.identity.clone()) } + } + + pub fn name(&self) -> &str { + self.identity.name.as_str() + } + + pub fn id(&self) -> &Uuid { + &self.identity.id + } +} + +/// Conversion to the internal API type. +impl Into for Project { + fn into(self) -> external::ProjectView { + external::ProjectView { identity: self.identity.into() } + } +} + +/// Conversion from the internal API type. +impl From for Project { + fn from(project: external::ProjectView) -> Self { + Self { identity: project.identity.into() } + } +} + +/// Serialization to DB. +impl SqlSerialize for Project { + fn sql_serialize(&self, output: &mut SqlValueSet) { + self.identity.sql_serialize(output); + } +} + +/// Deserialization from DB. +impl TryFrom<&tokio_postgres::Row> for Project { + type Error = Error; + + fn try_from(value: &tokio_postgres::Row) -> Result { + Ok(Project { identity: IdentityMetadata::try_from(value)? }) + } +} + +/// An Instance (VM). +#[derive(Clone, Debug)] +pub struct Instance { + /// common identifying metadata + pub identity: IdentityMetadata, + + /// id for the project containing this Instance + pub project_id: Uuid, + + /// state owned by the data plane + pub runtime: InstanceRuntimeState, + // TODO-completeness: add disks, network, tags, metrics + /// number of CPUs allocated for this Instance + pub ncpus: InstanceCpuCount, + /// memory allocated for this Instance + pub memory: ByteCount, + /// RFC1035-compliant hostname for the Instance. + // TODO-cleanup different type? + pub hostname: String, +} + +impl Instance { + pub fn new( + instance_id: Uuid, + project_id: Uuid, + params: &external::InstanceCreateParams, + runtime: InstanceRuntimeState, + ) -> Self { + Self { + identity: IdentityMetadata::new( + instance_id, + params.identity.clone(), + ), + project_id, + ncpus: params.ncpus, + memory: params.memory, + hostname: params.hostname.clone(), + runtime, + } + } +} + +/// Conversion to the external API type. +impl Into for Instance { + fn into(self) -> external::InstanceView { + external::InstanceView { + identity: self.identity.clone().into(), + project_id: self.project_id, + ncpus: self.ncpus, + memory: self.memory, + hostname: self.hostname.clone(), + runtime: self.runtime.into(), + } + } +} + +/// Serialization to DB. +impl SqlSerialize for Instance { + fn sql_serialize(&self, output: &mut SqlValueSet) { + self.identity.sql_serialize(output); + output.set("project_id", &self.project_id); + self.runtime.sql_serialize(output); + output.set("ncpus", &self.ncpus); + output.set("memory", &self.memory); + output.set("hostname", &self.hostname); + } +} + +/// Deserialization from DB. +impl TryFrom<&tokio_postgres::Row> for Instance { + type Error = Error; + + fn try_from(value: &tokio_postgres::Row) -> Result { + Ok(Instance { + identity: IdentityMetadata::try_from(value)?, + project_id: sql_row_value(value, "project_id")?, + ncpus: sql_row_value(value, "ncpus")?, + memory: sql_row_value(value, "memory")?, + hostname: sql_row_value(value, "hostname")?, + runtime: InstanceRuntimeState::try_from(value)?, + }) + } +} + +/// Runtime state of the Instance, including the actual running state and minimal +/// metadata +/// +/// This state is owned by the sled agent running that Instance. +#[derive(Clone, Debug)] +pub struct InstanceRuntimeState { + /// runtime state of the Instance + pub run_state: InstanceState, + /// which sled is running this Instance + pub sled_uuid: Uuid, + /// generation number for this state + pub gen: Generation, + /// timestamp for this information + pub time_updated: DateTime, +} + +/// Conversion to the external API type. +impl Into for InstanceRuntimeState { + fn into(self) -> external::InstanceRuntimeStateView { + external::InstanceRuntimeStateView { + run_state: self.run_state.0, + time_run_state_updated: self.time_updated, + } + } +} + +/// Conversion from the internal API type. +impl From for InstanceRuntimeState { + fn from(state: internal::nexus::InstanceRuntimeState) -> Self { + Self { + run_state: InstanceState(state.run_state), + sled_uuid: state.sled_uuid, + gen: state.gen, + time_updated: state.time_updated, + } + } +} + +/// Conversion to the internal API type. +impl Into for InstanceRuntimeState { + fn into(self) -> internal::nexus::InstanceRuntimeState { + internal::sled_agent::InstanceRuntimeState { + run_state: self.run_state.0, + sled_uuid: self.sled_uuid, + gen: self.gen, + time_updated: self.time_updated, + } + } +} + +/// Serialization to the database. +impl SqlSerialize for InstanceRuntimeState { + fn sql_serialize(&self, output: &mut SqlValueSet) { + self.run_state.sql_serialize(output); + output.set("active_server_id", &self.sled_uuid); + output.set("state_generation", &self.gen); + output.set("time_state_updated", &self.time_updated); + } +} + +/// Deserialization from the database. +impl TryFrom<&tokio_postgres::Row> for InstanceRuntimeState { + type Error = Error; + + fn try_from(value: &tokio_postgres::Row) -> Result { + Ok(InstanceRuntimeState { + run_state: InstanceState::try_from(value)?, + sled_uuid: sql_row_value(value, "active_server_id")?, + gen: sql_row_value(value, "state_generation")?, + time_updated: sql_row_value(value, "time_state_updated")?, + }) + } +} + +/// A wrapper around the external "InstanceState" object, +/// which may be stored to disk. +#[derive(Copy, Clone, Debug)] +pub struct InstanceState(external::InstanceState); + +impl InstanceState { + pub fn new(state: external::InstanceState) -> Self { + Self(state) + } + + pub fn state(&self) -> &external::InstanceState { + &self.0 + } +} + +/// Serialization to the database. +impl SqlSerialize for InstanceState { + fn sql_serialize(&self, output: &mut SqlValueSet) { + output.set("instance_state", &self.0.label()); + } +} + +/// Deserialization from the database. +impl TryFrom<&tokio_postgres::Row> for InstanceState { + type Error = Error; + + fn try_from(value: &tokio_postgres::Row) -> Result { + let variant: &str = sql_row_value(value, "instance_state")?; + Ok(InstanceState( + external::InstanceState::try_from(variant) + .map_err(|err| Error::InternalError { message: err })?, + )) + } +} + +/// A Disk (network block device). +#[derive(Clone, Debug)] +pub struct Disk { + /// common identifying metadata. + pub identity: IdentityMetadata, + /// id for the project containing this Disk + pub project_id: Uuid, + /// id for the snapshot from which this Disk was created (None means a blank + /// disk) + pub create_snapshot_id: Option, + /// size of the Disk + pub size: ByteCount, + /// runtime state of the Disk + pub runtime: DiskRuntimeState, +} + +impl Disk { + pub fn new( + disk_id: Uuid, + project_id: Uuid, + params: external::DiskCreateParams, + runtime_initial: DiskRuntimeState, + ) -> Self { + Self { + identity: IdentityMetadata::new(disk_id, params.identity), + project_id, + create_snapshot_id: params.snapshot_id, + size: params.size, + runtime: runtime_initial, + } + } +} + +/// Conversion to the external API type. +impl Into for Disk { + fn into(self) -> external::DiskView { + let device_path = format!("/mnt/{}", self.identity.name.as_str()); + external::DiskView { + identity: self.identity.clone().into(), + project_id: self.project_id, + snapshot_id: self.create_snapshot_id, + size: self.size, + state: self.runtime.disk_state.into(), + device_path, + } + } +} + +/// Serialization to the DB. +impl SqlSerialize for Disk { + fn sql_serialize(&self, output: &mut SqlValueSet) { + self.identity.sql_serialize(output); + output.set("project_id", &self.project_id); + self.runtime.sql_serialize(output); + output.set("size_bytes", &self.size); + output.set("origin_snapshot", &self.create_snapshot_id); + } +} + +/// Deserialization from the DB. +impl TryFrom<&tokio_postgres::Row> for Disk { + type Error = Error; + + fn try_from(value: &tokio_postgres::Row) -> Result { + Ok(Disk { + identity: IdentityMetadata::try_from(value)?, + project_id: sql_row_value(value, "project_id")?, + create_snapshot_id: sql_row_value(value, "origin_snapshot")?, + size: sql_row_value(value, "size_bytes")?, + runtime: DiskRuntimeState::try_from(value)?, + }) + } +} + +#[derive(Clone, Debug)] +pub struct DiskRuntimeState { + /// runtime state of the Disk + pub disk_state: DiskState, + /// generation number for this state + pub gen: Generation, + /// timestamp for this information + pub time_updated: DateTime, +} + +/// Conversion from the internal API type. +impl From for DiskRuntimeState { + fn from(runtime: internal::nexus::DiskRuntimeState) -> Self { + Self { + disk_state: runtime.disk_state.into(), + gen: runtime.gen, + time_updated: runtime.time_updated, + } + } +} + +/// Conversion to the internal API type. +impl Into for DiskRuntimeState { + fn into(self) -> internal::nexus::DiskRuntimeState { + internal::nexus::DiskRuntimeState { + disk_state: self.disk_state.into(), + gen: self.gen, + time_updated: self.time_updated, + } + } +} + +/// Serialization to the DB. +impl SqlSerialize for DiskRuntimeState { + fn sql_serialize(&self, output: &mut SqlValueSet) { + self.disk_state.sql_serialize(output); + output.set("state_generation", &self.gen); + output.set("time_state_updated", &self.time_updated); + } +} + +/// Deserialization from the DB. +impl TryFrom<&tokio_postgres::Row> for DiskRuntimeState { + type Error = Error; + + fn try_from(value: &tokio_postgres::Row) -> Result { + Ok(DiskRuntimeState { + disk_state: DiskState::try_from(value)?, + gen: sql_row_value(value, "state_generation")?, + time_updated: sql_row_value(value, "time_state_updated")?, + }) + } +} + +#[derive(Clone, Debug)] +pub struct DiskState(external::DiskState); + +impl DiskState { + pub fn new(state: external::DiskState) -> Self { + Self(state) + } + + pub fn state(&self) -> &external::DiskState { + &self.0 + } + + pub fn is_attached(&self) -> bool { + self.0.is_attached() + } + + pub fn attached_instance_id(&self) -> Option<&Uuid> { + self.0.attached_instance_id() + } +} + +/// Conversion from the external API type. +impl From for DiskState { + fn from(state: external::DiskState) -> Self { + Self(state) + } +} +/// Conversion to the external API type. +impl Into for DiskState { + fn into(self) -> external::DiskState { + self.0 + } +} + +/// Serialization to the DB. +impl SqlSerialize for DiskState { + fn sql_serialize(&self, output: &mut SqlValueSet) { + let attach_id = &self.0.attached_instance_id().map(|id| *id); + output.set("attach_instance_id", attach_id); + output.set("disk_state", &self.0.label()); + } +} + +/// Deserialization from the DB. +impl TryFrom<&tokio_postgres::Row> for DiskState { + type Error = Error; + + fn try_from(value: &tokio_postgres::Row) -> Result { + let disk_state_str: &str = sql_row_value(value, "disk_state")?; + let instance_uuid: Option = + sql_row_value(value, "attach_instance_id")?; + Ok(DiskState( + external::DiskState::try_from((disk_state_str, instance_uuid)) + .map_err(|e| Error::internal_error(&e))?, + )) + } +} + +#[derive(Clone, Debug)] +pub struct DiskAttachment { + pub instance_id: Uuid, + pub disk_id: Uuid, + pub disk_name: external::Name, + pub disk_state: DiskState, +} + +impl Into for DiskAttachment { + fn into(self) -> external::DiskAttachment { + external::DiskAttachment { + instance_id: self.instance_id, + disk_id: self.disk_id, + disk_name: self.disk_name, + disk_state: self.disk_state.into(), + } + } +} + +impl TryFrom<&tokio_postgres::Row> for DiskAttachment { + type Error = Error; + + fn try_from(value: &tokio_postgres::Row) -> Result { + Ok(DiskAttachment { + instance_id: sql_row_value(value, "attach_instance_id")?, + disk_id: sql_row_value(value, "id")?, + disk_name: sql_row_value(value, "name")?, + disk_state: DiskState::try_from(value)?, + }) + } +} + +/// Information announced by a metric server, used so that clients can contact it and collect +/// available metric data from it. +#[derive(Debug, Clone)] +pub struct ProducerEndpoint { + pub id: Uuid, + pub time_created: DateTime, + pub time_modified: DateTime, + pub address: SocketAddr, + pub base_route: String, + pub interval: Duration, +} + +impl ProducerEndpoint { + pub fn new(endpoint: &internal::nexus::ProducerEndpoint) -> Self { + let now = Utc::now(); + Self { + id: endpoint.id, + time_created: now, + time_modified: now, + address: endpoint.address, + base_route: endpoint.base_route.clone(), + interval: endpoint.interval, + } + } + + /// Return the route that can be used to request metric data. + pub fn collection_route(&self) -> String { + format!("{}/{}", &self.base_route, &self.id) + } +} + +impl SqlSerialize for ProducerEndpoint { + fn sql_serialize(&self, output: &mut SqlValueSet) { + output.set("id", &self.id); + output.set("time_created", &self.time_created); + output.set("time_modified", &self.time_modified); + output.set("ip", &self.address.ip()); + output.set("port", &i32::from(self.address.port())); + output.set("interval", &self.interval.as_secs_f64()); + output.set("route", &self.base_route); + } +} + +impl TryFrom<&tokio_postgres::Row> for ProducerEndpoint { + type Error = Error; + + fn try_from(value: &tokio_postgres::Row) -> Result { + let id: Uuid = sql_row_value(value, "id")?; + let time_created: DateTime = sql_row_value(value, "time_created")?; + let time_modified: DateTime = + sql_row_value(value, "time_modified")?; + let ip: IpAddr = sql_row_value(value, "ip")?; + let port: i32 = sql_row_value(value, "port")?; + let address = SocketAddr::new(ip, port as _); + let base_route: String = sql_row_value(value, "route")?; + let interval = + Duration::from_secs_f64(sql_row_value(value, "interval")?); + Ok(Self { + id, + time_created, + time_modified, + address, + base_route, + interval, + }) + } +} + +/// Message used to notify Nexus that this oximeter instance is up and running. +#[derive(Debug, Clone, Copy)] +pub struct OximeterInfo { + /// The ID for this oximeter instance. + pub collector_id: Uuid, + /// When this resource was created. + pub time_created: DateTime, + /// When this resource was last modified. + pub time_modified: DateTime, + /// The address on which this oximeter instance listens for requests + pub address: SocketAddr, +} + +impl OximeterInfo { + pub fn new(info: &internal::nexus::OximeterInfo) -> Self { + let now = Utc::now(); + Self { + collector_id: info.collector_id, + time_created: now, + time_modified: now, + address: info.address, + } + } +} + +impl SqlSerialize for OximeterInfo { + fn sql_serialize(&self, output: &mut SqlValueSet) { + output.set("id", &self.collector_id); + output.set("time_created", &self.time_created); + output.set("time_modified", &self.time_modified); + output.set("ip", &self.address.ip()); + output.set("port", &i32::from(self.address.port())); + } +} + +impl TryFrom<&tokio_postgres::Row> for OximeterInfo { + type Error = Error; + + fn try_from(value: &tokio_postgres::Row) -> Result { + let collector_id: Uuid = sql_row_value(value, "id")?; + let ip: IpAddr = sql_row_value(value, "ip")?; + let port: i32 = sql_row_value(value, "port")?; + let time_created: DateTime = sql_row_value(value, "time_created")?; + let time_modified: DateTime = + sql_row_value(value, "time_modified")?; + let address = SocketAddr::new(ip, port as _); + Ok(Self { collector_id, time_created, time_modified, address }) + } +} + +/// An assignment of an Oximeter instance to a metric producer for collection. +#[derive(Debug, Clone, Copy)] +pub struct OximeterAssignment { + pub oximeter_id: Uuid, + pub producer_id: Uuid, +} + +impl SqlSerialize for OximeterAssignment { + fn sql_serialize(&self, output: &mut SqlValueSet) { + output.set("oximeter_id", &self.oximeter_id); + output.set("producer_id", &self.producer_id); + } +} + +impl TryFrom<&tokio_postgres::Row> for OximeterAssignment { + type Error = Error; + + fn try_from(value: &tokio_postgres::Row) -> Result { + let oximeter_id: Uuid = sql_row_value(value, "oximeter_id")?; + let producer_id: Uuid = sql_row_value(value, "producer_id")?; + Ok(Self { oximeter_id, producer_id }) + } +} + +#[derive(Clone, Debug)] +pub struct Vpc { + pub identity: IdentityMetadata, + pub project_id: Uuid, + pub dns_name: external::Name, +} + +impl Vpc { + pub fn new( + vpc_id: Uuid, + project_id: Uuid, + params: external::VpcCreateParams, + ) -> Self { + Self { + identity: IdentityMetadata::new(vpc_id, params.identity), + project_id, + dns_name: params.dns_name, + } + } +} + +impl Into for Vpc { + fn into(self) -> external::Vpc { + external::Vpc { + identity: self.identity.into(), + project_id: self.project_id, + dns_name: self.dns_name, + // VPC subnets are accessed through a separate row lookup. + vpc_subnets: vec![], + } + } +} + +impl TryFrom<&tokio_postgres::Row> for Vpc { + type Error = Error; + + fn try_from(value: &tokio_postgres::Row) -> Result { + Ok(Self { + identity: IdentityMetadata::try_from(value)?, + project_id: sql_row_value(value, "project_id")?, + dns_name: sql_row_value(value, "dns_name")?, + }) + } +} + +impl SqlSerialize for Vpc { + fn sql_serialize(&self, output: &mut SqlValueSet) { + self.identity.sql_serialize(output); + output.set("project_id", &self.project_id); + output.set("dns_name", &self.dns_name); + } +} + +#[derive(Clone, Debug)] +pub struct VpcSubnet { + pub identity: IdentityMetadata, + pub vpc_id: Uuid, + pub ipv4_block: Option, + pub ipv6_block: Option, +} + +impl TryFrom<&tokio_postgres::Row> for VpcSubnet { + type Error = Error; + + fn try_from(value: &tokio_postgres::Row) -> Result { + Ok(Self { + identity: IdentityMetadata::try_from(value)?, + vpc_id: sql_row_value(value, "vpc_id")?, + ipv4_block: sql_row_value(value, "ipv4_block")?, + ipv6_block: sql_row_value(value, "ipv6_block")?, + }) + } +} + +#[derive(Clone, Debug)] +pub struct NetworkInterface { + pub identity: IdentityMetadata, + pub vpc_id: Uuid, + pub subnet_id: Uuid, + pub mac: external::MacAddr, + pub ip: IpAddr, +} + +impl TryFrom<&tokio_postgres::Row> for NetworkInterface { + type Error = Error; + + fn try_from(value: &tokio_postgres::Row) -> Result { + Ok(Self { + identity: IdentityMetadata::try_from(value)?, + vpc_id: sql_row_value(value, "vpc_id")?, + subnet_id: sql_row_value(value, "subnet_id")?, + mac: sql_row_value(value, "mac")?, + ip: sql_row_value(value, "ip")?, + }) + } +} diff --git a/omicron-nexus/src/db/schema.rs b/omicron-nexus/src/db/schema.rs index 82297dff66..fae29e3b9b 100644 --- a/omicron-nexus/src/db/schema.rs +++ b/omicron-nexus/src/db/schema.rs @@ -5,7 +5,6 @@ * ./sql.rs. */ -use omicron_common::api; use omicron_common::api::external::Error; use omicron_common::api::external::Name; use omicron_common::api::external::ResourceType; @@ -16,10 +15,14 @@ use super::sql::ResourceTable; use super::sql::Table; use crate::db; +// TODO: Now that db/model.rs is a thing, we could actually impl Table +// for all those real structs? Might help to reduce things a bit; the +// schema does use them as ModelTypes anyway. + /** Describes the "Project" table */ pub struct Project; impl Table for Project { - type ModelType = api::internal::nexus::Project; + type ModelType = db::model::Project; const TABLE_NAME: &'static str = "Project"; const ALL_COLUMNS: &'static [&'static str] = &[ "id", @@ -38,7 +41,7 @@ impl ResourceTable for Project { /** Describes the "Instance" table */ pub struct Instance; impl Table for Instance { - type ModelType = api::internal::nexus::Instance; + type ModelType = db::model::Instance; const TABLE_NAME: &'static str = "Instance"; const ALL_COLUMNS: &'static [&'static str] = &[ "id", @@ -65,7 +68,7 @@ impl ResourceTable for Instance { /** Describes the "Disk" table */ pub struct Disk; impl Table for Disk { - type ModelType = api::internal::nexus::Disk; + type ModelType = db::model::Disk; const TABLE_NAME: &'static str = "Disk"; const ALL_COLUMNS: &'static [&'static str] = &[ "id", @@ -120,7 +123,7 @@ impl Table for SagaNodeEvent { /** Describes the "Oximeter" table */ pub struct Oximeter; impl Table for Oximeter { - type ModelType = api::internal::nexus::OximeterInfo; + type ModelType = db::model::OximeterInfo; const TABLE_NAME: &'static str = "Oximeter"; const ALL_COLUMNS: &'static [&'static str] = &["id", "time_created", "time_modified", "ip", "port"]; @@ -129,7 +132,7 @@ impl Table for Oximeter { /** Describes the "MetricProducer" table */ pub struct MetricProducer; impl Table for MetricProducer { - type ModelType = api::internal::nexus::ProducerEndpoint; + type ModelType = db::model::ProducerEndpoint; const TABLE_NAME: &'static str = "MetricProducer"; const ALL_COLUMNS: &'static [&'static str] = &[ "id", @@ -145,7 +148,7 @@ impl Table for MetricProducer { /** Describes the "OximeterAssignment" table */ pub struct OximeterAssignment; impl Table for OximeterAssignment { - type ModelType = api::internal::nexus::OximeterAssignment; + type ModelType = db::model::OximeterAssignment; const TABLE_NAME: &'static str = "OximeterAssignment"; const ALL_COLUMNS: &'static [&'static str] = &["oximeter_id", "producer_id", "time_created"]; @@ -154,7 +157,7 @@ impl Table for OximeterAssignment { /** Describes the "Vpc" table */ pub struct Vpc; impl Table for Vpc { - type ModelType = api::external::Vpc; + type ModelType = db::model::Vpc; const TABLE_NAME: &'static str = "Vpc"; const ALL_COLUMNS: &'static [&'static str] = &[ "id", @@ -175,7 +178,7 @@ impl ResourceTable for Vpc { /** Describes the "VpcSubnet" table */ pub struct VpcSubnet; impl Table for VpcSubnet { - type ModelType = api::external::VpcSubnet; + type ModelType = db::model::VpcSubnet; const TABLE_NAME: &'static str = "VpcSubnet"; const ALL_COLUMNS: &'static [&'static str] = &[ "id", @@ -193,7 +196,7 @@ impl Table for VpcSubnet { /** Describes the "NetworkInterface" table */ pub struct NetworkInterface; impl Table for NetworkInterface { - type ModelType = api::external::NetworkInterface; + type ModelType = db::model::NetworkInterface; const TABLE_NAME: &'static str = "NetworkInterface"; const ALL_COLUMNS: &'static [&'static str] = &[ "id", diff --git a/omicron-nexus/src/http_entrypoints_external.rs b/omicron-nexus/src/http_entrypoints_external.rs index 55f12a2d9d..0cfef7da6d 100644 --- a/omicron-nexus/src/http_entrypoints_external.rs +++ b/omicron-nexus/src/http_entrypoints_external.rs @@ -3,6 +3,7 @@ */ use super::ServerContext; +use crate::db; use dropshot::endpoint; use dropshot::ApiDescription; @@ -16,7 +17,6 @@ use dropshot::Query; use dropshot::RequestContext; use dropshot::ResultsPage; use dropshot::TypedBody; -use omicron_common::api; use omicron_common::api::external::http_pagination::data_page_params_for; use omicron_common::api::external::http_pagination::data_page_params_nameid_id; use omicron_common::api::external::http_pagination::data_page_params_nameid_name; @@ -176,8 +176,7 @@ async fn projects_get( }; let view_list = - to_list::(project_stream) - .await; + to_list::(project_stream).await; Ok(HttpResponseOk(ScanByNameOrId::results_page(&query, view_list)?)) } @@ -301,8 +300,7 @@ async fn project_disks_get( ) .await?; - let disk_list = - to_list::(disk_stream).await; + let disk_list = to_list::(disk_stream).await; Ok(HttpResponseOk(ScanByName::results_page(&query, disk_list)?)) } @@ -406,10 +404,8 @@ async fn project_instances_get( &data_page_params_for(&rqctx, &query)?, ) .await?; - let view_list = to_list::( - instance_stream, - ) - .await; + let view_list = + to_list::(instance_stream).await; Ok(HttpResponseOk(ScanByName::results_page(&query, view_list)?)) } @@ -580,7 +576,8 @@ async fn instance_disks_get( let disk_list = nexus .instance_list_disks(&project_name, &instance_name, &fake_query) .await?; - let view_list = to_list(disk_list).await; + let view_list = + to_list::(disk_list).await; Ok(HttpResponseOk(view_list)) } @@ -809,8 +806,7 @@ async fn hardware_racks_get( let query = query_params.into_inner(); let rack_stream = nexus.racks_list(&data_page_params_for(&rqctx, &query)?).await?; - let view_list = - to_list::(rack_stream).await; + let view_list = to_list::(rack_stream).await; Ok(HttpResponseOk(ScanById::results_page(&query, view_list)?)) } @@ -861,8 +857,7 @@ async fn hardware_sleds_get( let query = query_params.into_inner(); let sled_stream = nexus.sleds_list(&data_page_params_for(&rqctx, &query)?).await?; - let view_list = - to_list::(sled_stream).await; + let view_list = to_list::(sled_stream).await; Ok(HttpResponseOk(ScanById::results_page(&query, view_list)?)) } diff --git a/omicron-nexus/src/nexus.rs b/omicron-nexus/src/nexus.rs index 7406eddf4b..4c327004da 100644 --- a/omicron-nexus/src/nexus.rs +++ b/omicron-nexus/src/nexus.rs @@ -34,15 +34,10 @@ use omicron_common::api::external::UpdateResult; use omicron_common::api::external::Vpc; use omicron_common::api::external::VpcCreateParams; use omicron_common::api::external::VpcUpdateParams; -use omicron_common::api::internal::nexus::Disk; +use omicron_common::api::internal::nexus; use omicron_common::api::internal::nexus::DiskRuntimeState; -use omicron_common::api::internal::nexus::Instance; -use omicron_common::api::internal::nexus::InstanceRuntimeState; use omicron_common::api::internal::nexus::OximeterInfo; use omicron_common::api::internal::nexus::ProducerEndpoint; -use omicron_common::api::internal::nexus::Project; -use omicron_common::api::internal::nexus::Rack; -use omicron_common::api::internal::nexus::Sled; use omicron_common::api::internal::sled_agent::DiskStateRequested; use omicron_common::api::internal::sled_agent::InstanceRuntimeStateRequested; use omicron_common::api::internal::sled_agent::InstanceStateRequested; @@ -60,6 +55,9 @@ use steno::SagaTemplate; use steno::SagaType; use uuid::Uuid; +// TODO: When referring to API types, we should try to include +// the prefix unless it is unambiguous. + /** * Exposes additional [`Nexus`] interfaces for use by the test suite */ @@ -208,7 +206,8 @@ impl Nexus { oximeter_info: &OximeterInfo, ) -> Result<(), Error> { // Insert into the DB - self.db_datastore.oximeter_create(oximeter_info).await?; + let db_info = db::model::OximeterInfo::new(&oximeter_info); + self.db_datastore.oximeter_create(&db_info).await?; let id = oximeter_info.collector_id; let client_log = @@ -297,26 +296,23 @@ impl Nexus { pub async fn project_create( &self, new_project: &ProjectCreateParams, - ) -> CreateResult { + ) -> CreateResult { + // Create a project. + let db_project = db::model::Project::new(new_project); + let project: db::model::Project = + self.db_datastore.project_create(&db_project).await?; // TODO: We probably want to have "project creation" and "default VPC // creation" co-located within a saga for atomicity. // // Until then, we just perform the operations sequentially. - // Create a project. - let project_id = Uuid::new_v4(); - let project = self - .db_datastore - .project_create_with_id(&project_id, new_project) - .await?; - // Create a default VPC associated with the project. - let vpc_id = Uuid::new_v4(); + let id = Uuid::new_v4(); let _ = self .db_datastore .project_create_vpc( - &vpc_id, - &project_id, + &id, + project.id(), &VpcCreateParams { identity: IdentityMetadataCreateParams { name: Name::try_from("default").unwrap(), @@ -332,21 +328,24 @@ impl Nexus { Ok(project) } - pub async fn project_fetch(&self, name: &Name) -> LookupResult { - Ok(self.db_datastore.project_fetch(name).await?) + pub async fn project_fetch( + &self, + name: &Name, + ) -> LookupResult { + self.db_datastore.project_fetch(name).await } pub async fn projects_list_by_name( &self, pagparams: &DataPageParams<'_, Name>, - ) -> ListResult { + ) -> ListResult { self.db_datastore.projects_list_by_name(pagparams).await } pub async fn projects_list_by_id( &self, pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResult { + ) -> ListResult { self.db_datastore.projects_list_by_id(pagparams).await } @@ -358,8 +357,8 @@ impl Nexus { &self, name: &Name, new_params: &ProjectUpdateParams, - ) -> UpdateResult { - Ok(self.db_datastore.project_update(name, new_params).await?) + ) -> UpdateResult { + self.db_datastore.project_update(name, new_params).await } /* @@ -370,7 +369,7 @@ impl Nexus { &self, project_name: &Name, pagparams: &DataPageParams<'_, Name>, - ) -> ListResult { + ) -> ListResult { let project_id = self.db_datastore.project_lookup_id_by_name(project_name).await?; self.db_datastore.project_list_disks(&project_id, pagparams).await @@ -380,7 +379,7 @@ impl Nexus { &self, project_name: &Name, params: &DiskCreateParams, - ) -> CreateResult { + ) -> CreateResult { let project = self.project_fetch(project_name).await?; /* @@ -399,10 +398,10 @@ impl Nexus { .db_datastore .project_create_disk( &disk_id, - &project.identity.id, + &project.id(), params, - &DiskRuntimeState { - disk_state: DiskState::Creating, + &db::model::DiskRuntimeState { + disk_state: db::model::DiskState::new(DiskState::Creating), gen: Generation::new(), time_updated: Utc::now(), }, @@ -423,8 +422,8 @@ impl Nexus { self.db_datastore .disk_update_runtime( &disk_id, - &DiskRuntimeState { - disk_state: DiskState::Detached, + &db::model::DiskRuntimeState { + disk_state: db::model::DiskState::new(DiskState::Detached), gen: disk_created.runtime.gen.next(), time_updated: Utc::now(), }, @@ -438,7 +437,7 @@ impl Nexus { &self, project_name: &Name, disk_name: &Name, - ) -> LookupResult { + ) -> LookupResult { let project_id = self.db_datastore.project_lookup_id_by_name(project_name).await?; self.db_datastore.disk_fetch_by_name(&project_id, disk_name).await @@ -450,7 +449,7 @@ impl Nexus { disk_name: &Name, ) -> DeleteResult { let disk = self.project_lookup_disk(project_name, disk_name).await?; - bail_unless!(disk.runtime.disk_state != DiskState::Destroyed); + bail_unless!(disk.runtime.disk_state.state() != &DiskState::Destroyed); if disk.runtime.disk_state.is_attached() { return Err(Error::InvalidRequest { @@ -504,7 +503,7 @@ impl Nexus { &self, project_name: &Name, pagparams: &DataPageParams<'_, Name>, - ) -> ListResult { + ) -> ListResult { let project_id = self.db_datastore.project_lookup_id_by_name(project_name).await?; self.db_datastore.project_list_instances(&project_id, pagparams).await @@ -514,7 +513,7 @@ impl Nexus { self: &Arc, project_name: &Name, params: &InstanceCreateParams, - ) -> CreateResult { + ) -> CreateResult { let project_id = self.db_datastore.project_lookup_id_by_name(project_name).await?; @@ -608,7 +607,7 @@ impl Nexus { &self, project_name: &Name, instance_name: &Name, - ) -> LookupResult { + ) -> LookupResult { let project_id = self.db_datastore.project_lookup_id_by_name(project_name).await?; self.db_datastore @@ -618,7 +617,7 @@ impl Nexus { fn check_runtime_change_allowed( &self, - runtime: &InstanceRuntimeState, + runtime: &nexus::InstanceRuntimeState, ) -> Result<(), Error> { /* * Users are allowed to request a start or stop even if the instance is @@ -669,7 +668,7 @@ impl Nexus { */ async fn instance_sled( &self, - instance: &Instance, + instance: &db::model::Instance, ) -> Result, Error> { let said = &instance.runtime.sled_uuid; self.sled_client(&said).await @@ -682,7 +681,7 @@ impl Nexus { &self, project_name: &Name, instance_name: &Name, - ) -> UpdateResult { + ) -> UpdateResult { /* * To implement reboot, we issue a call to the sled agent to set a * runtime state of "reboot". We cannot simply stop the Instance and @@ -699,7 +698,7 @@ impl Nexus { let instance = self.project_lookup_instance(project_name, instance_name).await?; - self.check_runtime_change_allowed(&instance.runtime)?; + self.check_runtime_change_allowed(&instance.runtime.clone().into())?; self.instance_set_runtime( &instance, self.instance_sled(&instance).await?, @@ -718,11 +717,11 @@ impl Nexus { &self, project_name: &Name, instance_name: &Name, - ) -> UpdateResult { + ) -> UpdateResult { let instance = self.project_lookup_instance(project_name, instance_name).await?; - self.check_runtime_change_allowed(&instance.runtime)?; + self.check_runtime_change_allowed(&instance.runtime.clone().into())?; self.instance_set_runtime( &instance, self.instance_sled(&instance).await?, @@ -741,11 +740,11 @@ impl Nexus { &self, project_name: &Name, instance_name: &Name, - ) -> UpdateResult { + ) -> UpdateResult { let instance = self.project_lookup_instance(project_name, instance_name).await?; - self.check_runtime_change_allowed(&instance.runtime)?; + self.check_runtime_change_allowed(&instance.runtime.clone().into())?; self.instance_set_runtime( &instance, self.instance_sled(&instance).await?, @@ -763,7 +762,7 @@ impl Nexus { */ async fn instance_set_runtime( &self, - instance: &Instance, + instance: &db::model::Instance, sa: Arc, requested: InstanceRuntimeStateRequested, ) -> Result<(), Error> { @@ -776,13 +775,13 @@ impl Nexus { let new_runtime = sa .instance_ensure( instance.identity.id, - instance.runtime.clone(), + instance.runtime.clone().into(), requested, ) .await?; self.db_datastore - .instance_update_runtime(&instance.identity.id, &new_runtime) + .instance_update_runtime(&instance.identity.id, &new_runtime.into()) .await .map(|_| ()) } @@ -795,7 +794,7 @@ impl Nexus { project_name: &Name, instance_name: &Name, pagparams: &DataPageParams<'_, Name>, - ) -> ListResult { + ) -> ListResult { let instance = self.project_lookup_instance(project_name, instance_name).await?; self.db_datastore @@ -823,7 +822,7 @@ impl Nexus { instance_id: instance.identity.id, disk_name: disk.identity.name.clone(), disk_id: disk.identity.id, - disk_state: disk.runtime.disk_state.clone(), + disk_state: disk.runtime.disk_state.clone().into(), }); } } @@ -853,8 +852,8 @@ impl Nexus { let instance_id = &instance.identity.id; fn disk_attachment_for( - instance: &Instance, - disk: &Disk, + instance: &db::model::Instance, + disk: &db::model::Disk, ) -> CreateResult { let instance_id = &instance.identity.id; assert_eq!( @@ -865,12 +864,14 @@ impl Nexus { instance_id: *instance_id, disk_id: disk.identity.id, disk_name: disk.identity.name.clone(), - disk_state: disk.runtime.disk_state.clone(), + disk_state: disk.runtime.disk_state.clone().into(), }) } - fn disk_attachment_error(disk: &Disk) -> CreateResult { - let disk_status = match disk.runtime.disk_state { + fn disk_attachment_error( + disk: &db::model::Disk, + ) -> CreateResult { + let disk_status = match disk.runtime.disk_state.clone().into() { DiskState::Destroyed => "disk is destroyed", DiskState::Faulted => "disk is faulted", DiskState::Creating => "disk is detached", @@ -900,7 +901,7 @@ impl Nexus { Err(Error::InvalidRequest { message }) } - match &disk.runtime.disk_state { + match &disk.runtime.disk_state.clone().into() { /* * If we're already attaching or attached to the requested instance, * there's nothing else to do. @@ -966,7 +967,7 @@ impl Nexus { let disk = self.project_lookup_disk(project_name, disk_name).await?; let instance_id = &instance.identity.id; - match &disk.runtime.disk_state { + match &disk.runtime.disk_state.clone().into() { /* * This operation is a noop if the disk is not attached or already * detaching from the same instance. @@ -1017,7 +1018,7 @@ impl Nexus { */ async fn disk_set_runtime( &self, - disk: &Disk, + disk: &db::model::Disk, sa: Arc, requested: DiskStateRequested, ) -> Result<(), Error> { @@ -1026,10 +1027,14 @@ impl Nexus { * reflect the new intermediate state. */ let new_runtime = sa - .disk_ensure(disk.identity.id, disk.runtime.clone(), requested) + .disk_ensure( + disk.identity.id, + disk.runtime.clone().into(), + requested, + ) .await?; self.db_datastore - .disk_update_runtime(&disk.identity.id, &new_runtime) + .disk_update_runtime(&disk.identity.id, &new_runtime.into()) .await .map(|_| ()) } @@ -1041,7 +1046,12 @@ impl Nexus { ) -> ListResult { let project_id = self.db_datastore.project_lookup_id_by_name(project_name).await?; - self.db_datastore.project_list_vpcs(&project_id, pagparams).await + let db_stream = + self.db_datastore.project_list_vpcs(&project_id, pagparams).await?; + let api_stream = Box::pin( + db_stream.map(|result| result.map(|db_vpc| db_vpc.into())), + ); + Ok(api_stream) } pub async fn project_create_vpc( @@ -1056,7 +1066,7 @@ impl Nexus { .db_datastore .project_create_vpc(&id, &project_id, params) .await?; - Ok(vpc) + Ok(vpc.into()) } pub async fn project_lookup_vpc( @@ -1066,7 +1076,11 @@ impl Nexus { ) -> LookupResult { let project_id = self.db_datastore.project_lookup_id_by_name(project_name).await?; - self.db_datastore.vpc_fetch_by_name(&project_id, vpc_name).await + Ok(self + .db_datastore + .vpc_fetch_by_name(&project_id, vpc_name) + .await? + .into()) } pub async fn project_update_vpc( @@ -1079,7 +1093,10 @@ impl Nexus { self.db_datastore.project_lookup_id_by_name(project_name).await?; let vpc = self.db_datastore.vpc_fetch_by_name(&project_id, vpc_name).await?; - self.db_datastore.project_update_vpc(&vpc.identity.id, params).await + Ok(self + .db_datastore + .project_update_vpc(&vpc.identity.id, params) + .await?) } pub async fn project_delete_vpc( @@ -1095,14 +1112,14 @@ impl Nexus { * Racks. We simulate just one for now. */ - fn as_rack(&self) -> Rack { - Rack { identity: self.api_rack_identity.clone() } + fn as_rack(&self) -> db::model::Rack { + db::model::Rack { identity: self.api_rack_identity.clone().into() } } pub async fn racks_list( &self, pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResult { + ) -> ListResult { if let Some(marker) = pagparams.marker { if *marker >= self.rack_id { return Ok(futures::stream::empty().boxed()); @@ -1112,7 +1129,10 @@ impl Nexus { Ok(futures::stream::once(ready(Ok(self.as_rack()))).boxed()) } - pub async fn rack_lookup(&self, rack_id: &Uuid) -> LookupResult { + pub async fn rack_lookup( + &self, + rack_id: &Uuid, + ) -> LookupResult { if *rack_id == self.rack_id { Ok(self.as_rack()) } else { @@ -1130,14 +1150,14 @@ impl Nexus { pub async fn sleds_list( &self, pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResult { + ) -> ListResult { let sled_agents = self.sled_agents.lock().await; let sleds = collection_page(&sled_agents, pagparams)? .filter(|maybe_object| ready(maybe_object.is_ok())) .map(|sa| { let sa = sa.unwrap(); - Ok(Sled { - identity: IdentityMetadata { + Ok(db::model::Sled { + identity: db::model::IdentityMetadata { /* TODO-correctness cons up real metadata here */ id: sa.id, name: Name::try_from(format!("sled-{}", sa.id)) @@ -1149,19 +1169,22 @@ impl Nexus { service_address: sa.service_address, }) }) - .collect::>>() + .collect::>>() .await; Ok(futures::stream::iter(sleds).boxed()) } - pub async fn sled_lookup(&self, sled_id: &Uuid) -> LookupResult { + pub async fn sled_lookup( + &self, + sled_id: &Uuid, + ) -> LookupResult { let nexuses = self.sled_agents.lock().await; let sa = nexuses.get(sled_id).ok_or_else(|| { Error::not_found_by_id(ResourceType::Sled, sled_id) })?; - Ok(Sled { - identity: IdentityMetadata { + Ok(db::model::Sled { + identity: db::model::IdentityMetadata { /* TODO-correctness cons up real metadata here */ id: sa.id, name: Name::try_from(format!("sled-{}", sa.id)).unwrap(), @@ -1221,13 +1244,13 @@ impl Nexus { pub async fn notify_instance_updated( &self, id: &Uuid, - new_runtime_state: &InstanceRuntimeState, + new_runtime_state: &nexus::InstanceRuntimeState, ) -> Result<(), Error> { let log = &self.log; let result = self .db_datastore - .instance_update_runtime(id, new_runtime_state) + .instance_update_runtime(id, &(new_runtime_state.clone().into())) .await; match result { @@ -1283,7 +1306,10 @@ impl Nexus { ) -> Result<(), Error> { let log = &self.log; - let result = self.db_datastore.disk_update_runtime(id, new_state).await; + let result = self + .db_datastore + .disk_update_runtime(id, &new_state.clone().into()) + .await; /* TODO-cleanup commonize with notify_instance_updated() */ match result { @@ -1336,7 +1362,8 @@ impl Nexus { &self, producer_info: ProducerEndpoint, ) -> Result<(), Error> { - self.db_datastore.producer_endpoint_create(&producer_info).await?; + let db_info = db::model::ProducerEndpoint::new(&producer_info); + self.db_datastore.producer_endpoint_create(&db_info).await?; let collector = self.next_collector().await?; collector.register_producer(&producer_info).await?; self.db_datastore diff --git a/omicron-nexus/src/sagas.rs b/omicron-nexus/src/sagas.rs index 65604b926b..6d8dda8ea5 100644 --- a/omicron-nexus/src/sagas.rs +++ b/omicron-nexus/src/sagas.rs @@ -144,11 +144,11 @@ async fn sic_create_instance_record( &instance_id?, ¶ms.project_id, ¶ms.create_params, - &runtime, + &runtime.into(), ) .await .map_err(ActionError::action_failed)?; - Ok(instance.runtime) + Ok(instance.runtime.into()) } async fn sic_instance_ensure( @@ -182,7 +182,7 @@ async fn sic_instance_ensure( osagactx .datastore() - .instance_update_runtime(&instance_id, &new_runtime_state) + .instance_update_runtime(&instance_id, &new_runtime_state.into()) .await .map(|_| ()) .map_err(ActionError::action_failed)