diff --git a/Cargo.lock b/Cargo.lock index 4a4684ef720..088f71a75f2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8118,6 +8118,7 @@ name = "oximeter-db" version = "0.1.0" dependencies = [ "anyhow", + "assert_matches", "async-recursion", "async-trait", "bcs", diff --git a/nexus/src/app/metrics.rs b/nexus/src/app/metrics.rs index 5b77e681b13..040c070168f 100644 --- a/nexus/src/app/metrics.rs +++ b/nexus/src/app/metrics.rs @@ -14,7 +14,9 @@ use nexus_db_queries::{ use nexus_external_api::TimeseriesSchemaPaginationParams; use nexus_types::external_api::params::SystemMetricName; use omicron_common::api::external::{Error, InternalContext}; -use oximeter_db::{Measurement, TimeseriesSchema}; +use oximeter_db::{ + Measurement, TimeseriesSchema, oxql::query::QueryAuthzScope, +}; use std::num::NonZeroU32; impl super::Nexus { @@ -138,7 +140,7 @@ impl super::Nexus { // resources they have access to. opctx.authorize(authz::Action::Read, &authz::FLEET).await?; self.timeseries_client - .oxql_query(query) + .oxql_query(query, QueryAuthzScope::Fleet) .await // TODO-observability: The query method returns information // about the duration of the OxQL query and the database @@ -161,17 +163,14 @@ impl super::Nexus { // Ensure the user has read access to the project let (authz_silo, authz_project) = project_lookup.lookup_for(authz::Action::Read).await?; - - // Ensure the query only refers to the project - let filtered_query = format!( - "{} | filter silo_id == \"{}\" && project_id == \"{}\"", - query.as_ref(), - authz_silo.id(), - authz_project.id() - ); - self.timeseries_client - .oxql_query(filtered_query) + .oxql_query( + query, + QueryAuthzScope::Project { + silo_id: authz_silo.id(), + project_id: authz_project.id(), + }, + ) .await .map(|result| result.tables) .map_err(map_timeseries_err) diff --git a/nexus/tests/integration_tests/metrics.rs b/nexus/tests/integration_tests/metrics.rs index 8d664f06f08..e1125b83258 100644 --- a/nexus/tests/integration_tests/metrics.rs +++ b/nexus/tests/integration_tests/metrics.rs @@ -472,8 +472,10 @@ async fn test_project_timeseries_query( let _p2 = create_project(&client, "project2").await; // Create resources in each project - let i1 = create_instance(&client, "project1", "instance1").await; - let _i2 = create_instance(&client, "project2", "instance2").await; + let i1p1 = create_instance(&client, "project1", "instance1").await; + // need a second instance to test group_by + let i2p1 = create_instance(&client, "project1", "instance2").await; + let _i3p2 = create_instance(&client, "project2", "instance3").await; let internal_client = &cptestctx.internal_client; @@ -520,14 +522,16 @@ async fn test_project_timeseries_query( let result = metrics_querier.project_timeseries_query("project1", q2).await; assert_eq!(result.len(), 1); - assert!(result[0].timeseries().len() > 0); + // we get 2 timeseries because there are two instances + assert!(result[0].timeseries().len() == 2); let result = metrics_querier.project_timeseries_query("project2", q2).await; assert_eq!(result.len(), 1); assert_eq!(result[0].timeseries().len(), 0); // with instance specified - let q3 = &format!("{} | filter instance_id == \"{}\"", q1, i1.identity.id); + let q3 = + &format!("{} | filter instance_id == \"{}\"", q1, i1p1.identity.id); // project containing instance gives me something let result = metrics_querier.project_timeseries_query("project1", q3).await; @@ -539,11 +543,43 @@ async fn test_project_timeseries_query( assert_eq!(result.len(), 1); assert_eq!(result[0].timeseries().len(), 0); + // now let's test it with group_by + let q4 = &format!( + "{} | align mean_within(1m) | group_by [instance_id], sum", + q1 + ); + let result = metrics_querier.project_timeseries_query("project1", q4).await; + assert_eq!(result.len(), 1); + assert_eq!(result[0].timeseries().len(), 2); + + // test with a nested query + let q5 = &format!( + "{{ \ + get virtual_machine:check | filter instance_id == \"{}\"; \ + get virtual_machine:check | filter instance_id == \"{}\" \ + }} | filter timestamp < @now()", + i1p1.identity.id, i2p1.identity.id, + ); + let result = metrics_querier.project_timeseries_query("project1", q5).await; + + // we get two results, each contains one timeseries, and the instance ID + // on each corresponds to the one we requested + assert_eq!(result.len(), 2); + assert_eq!(result[0].timeseries().len(), 1); + let timeseries = result[0].timeseries().next().unwrap(); + let instance_id = timeseries.fields.get("instance_id").unwrap().to_string(); + assert_eq!(instance_id, i1p1.identity.id.to_string()); + + assert_eq!(result[1].timeseries().len(), 1); + let timeseries = result[1].timeseries().next().unwrap(); + let instance_id = timeseries.fields.get("instance_id").unwrap().to_string(); + assert_eq!(instance_id, i2p1.identity.id.to_string()); + // expect error when querying a metric that has no project_id on it - let q4 = "get integration_target:integration_metric"; + let q6 = "get integration_target:integration_metric"; let url = "/v1/timeseries/query?project=project1"; let body = nexus_types::external_api::params::TimeseriesQuery { - query: q4.to_string(), + query: q6.to_string(), }; let result = object_create_error(client, url, &body, StatusCode::BAD_REQUEST).await; @@ -556,14 +592,14 @@ async fn test_project_timeseries_query( The filter expression refers to \ identifiers that are not valid for its input \ table \"integration_target:integration_metric\". \ - Invalid identifiers: [\"silo_id\", \"project_id\"], \ + Invalid identifiers: [\"silo_id\"], \ valid identifiers: [\"datum\", \"metric_name\", \"target_name\", \"timestamp\"]"; assert!(result.message.ends_with(EXPECTED_ERROR_MESSAGE)); // nonexistent project let url = "/v1/timeseries/query?project=nonexistent"; let body = nexus_types::external_api::params::TimeseriesQuery { - query: q4.to_string(), + query: q6.to_string(), }; let result = object_create_error(client, url, &body, StatusCode::NOT_FOUND).await; @@ -606,7 +642,7 @@ async fn test_project_timeseries_query( .execute_and_parse_unwrap::() .await; assert_eq!(result.tables.len(), 1); - assert_eq!(result.tables[0].timeseries().len(), 1); + assert_eq!(result.tables[0].timeseries().len(), 2); // two instances } #[nexus_test] diff --git a/oximeter/db/Cargo.toml b/oximeter/db/Cargo.toml index 759f6334a8e..c7bdd69aa4f 100644 --- a/oximeter/db/Cargo.toml +++ b/oximeter/db/Cargo.toml @@ -104,6 +104,7 @@ nom.workspace = true quote.workspace = true [dev-dependencies] +assert_matches.workspace = true camino-tempfile.workspace = true criterion = { workspace = true, features = [ "async_tokio" ] } expectorate.workspace = true diff --git a/oximeter/db/src/client/oxql.rs b/oximeter/db/src/client/oxql.rs index 520b653a878..92c4003307c 100644 --- a/oximeter/db/src/client/oxql.rs +++ b/oximeter/db/src/client/oxql.rs @@ -20,6 +20,7 @@ use crate::oxql::ast::table_ops::filter; use crate::oxql::ast::table_ops::filter::Filter; use crate::oxql::ast::table_ops::limit::Limit; use crate::oxql::ast::table_ops::limit::LimitKind; +use crate::oxql::query::QueryAuthzScope; use crate::query::field_table_name; use oximeter::Measurement; use oximeter::TimeseriesSchema; @@ -147,22 +148,13 @@ impl Client { pub async fn oxql_query( &self, query: impl AsRef, + scope: QueryAuthzScope, ) -> Result { - // TODO-security: Need a way to implement authz checks for things like - // viewing resources in another project or silo. - // - // I think one way to do that is look at the predicates and make sure - // they refer to things the user has access to. Another is to add some - // implicit predicates here, indicating the subset of fields that the - // query should be able to access. - // - // This probably means we'll need to parse the query in Nexus, so that - // we can attach the other filters ourselves. - // - // See https://github.com/oxidecomputer/omicron/issues/5298. let query = query.as_ref(); let parsed_query = oxql::Query::new(query)?; - let plan = self.build_query_plan(&parsed_query).await?; + let filtered_query = parsed_query.insert_authz_filters(scope); + + let plan = self.build_query_plan(&filtered_query).await?; if plan.requires_full_table_scan() { return Err(Error::Oxql(anyhow::anyhow!( "This query requires at least one full table scan. \ @@ -179,6 +171,7 @@ impl Client { "parsed OxQL query"; "query" => query, "parsed_query" => ?parsed_query, + "filtered_query" => ?filtered_query, ); let id = usdt::UniqueId::new(); probes::oxql__query__start!(|| (&id, &query_id, query)); @@ -188,7 +181,7 @@ impl Client { &query_log, &mut self.claim_connection().await?, query_id, - parsed_query, + filtered_query, &mut total_rows_fetched, None, None, @@ -1187,7 +1180,9 @@ fn update_total_rows_and_check( #[cfg(test)] mod tests { use super::ConsistentKeyGroup; - use crate::client::oxql::chunk_consistent_key_groups_impl; + use crate::client::oxql::{ + QueryAuthzScope, chunk_consistent_key_groups_impl, + }; use crate::oxql::ast::grammar::query_parser; use crate::{Client, DATABASE_TIMESTAMP_FORMAT, DbWrite}; use crate::{Metric, Target}; @@ -1348,7 +1343,7 @@ mod tests { "get some_target:some_metric | filter timestamp > @2020-01-01"; let result = ctx .client - .oxql_query(query) + .oxql_query(query, QueryAuthzScope::Fleet) .await .expect("failed to run OxQL query"); assert_eq!(result.tables.len(), 1, "Should be exactly 1 table"); @@ -1407,7 +1402,7 @@ mod tests { ); let result = ctx .client - .oxql_query(&query) + .oxql_query(&query, QueryAuthzScope::Fleet) .await .expect("failed to run OxQL query"); assert_eq!(result.tables.len(), 1, "Should be exactly 1 table"); @@ -1463,7 +1458,7 @@ mod tests { ); let result = ctx .client - .oxql_query(&query) + .oxql_query(&query, QueryAuthzScope::Fleet) .await .expect("failed to run OxQL query"); assert_eq!(result.tables.len(), 1, "Should be exactly 1 table"); @@ -1695,7 +1690,7 @@ mod tests { ); let result = ctx .client - .oxql_query(&query) + .oxql_query(&query, QueryAuthzScope::Fleet) .await .expect("failed to run OxQL query"); assert_eq!(result.tables.len(), 1, "Should be exactly 1 table"); diff --git a/oximeter/db/src/oxql/ast/mod.rs b/oximeter/db/src/oxql/ast/mod.rs index 363b84126ad..232616614dd 100644 --- a/oximeter/db/src/oxql/ast/mod.rs +++ b/oximeter/db/src/oxql/ast/mod.rs @@ -12,6 +12,7 @@ use std::fmt; use chrono::DateTime; use chrono::Utc; use oximeter::TimeseriesName; +use table_ops::filter::Filter; use self::table_ops::BasicTableOp; use self::table_ops::GroupedTableOp; @@ -44,7 +45,7 @@ impl fmt::Display for Query { impl Query { // Return the first operation in the query, which is always a form of `get`. - fn first_op(&self) -> &TableOp { + pub(crate) fn first_op(&self) -> &TableOp { self.ops.first().expect("Should have parsed at least 1 operation") } @@ -188,6 +189,37 @@ impl Query { } } } + + /// Insert filters after the `get`, or in the case of subqueries, recurse + /// down the tree and insert them after each get. + pub(crate) fn insert_filters(&self, filters: Vec) -> Self { + let mut new_ops = self.ops.clone(); + + match self.first_op() { + // for a basic query, just insert the filters after the first entry (the get) + TableOp::Basic(_) => { + let filter_ops = filters + .iter() + .map(|filter| { + TableOp::Basic(BasicTableOp::Filter(filter.clone())) + }) + .collect::>(); + new_ops.splice(1..1, filter_ops); + } + // for a grouped query, recurse to insert the filters in all subqueries + TableOp::Grouped(op) => { + new_ops[0] = TableOp::Grouped(GroupedTableOp { + ops: op + .ops + .iter() + .map(|query| query.insert_filters(filters.clone())) + .collect(), + }); + } + } + + Self { ops: new_ops } + } } // Either a flat query or one with nested subqueries. diff --git a/oximeter/db/src/oxql/query/mod.rs b/oximeter/db/src/oxql/query/mod.rs index 13fed9bdf67..824b109ff92 100644 --- a/oximeter/db/src/oxql/query/mod.rs +++ b/oximeter/db/src/oxql/query/mod.rs @@ -9,12 +9,15 @@ use std::collections::BTreeSet; use super::ast::SplitQuery; +use super::ast::cmp::Comparison; use super::ast::ident::Ident; +use super::ast::literal::Literal; use super::ast::logical_op::LogicalOp; use super::ast::table_ops::BasicTableOp; use super::ast::table_ops::TableOp; use super::ast::table_ops::filter::CompoundFilter; use super::ast::table_ops::filter::FilterExpr; +use super::ast::table_ops::filter::SimpleFilter; use super::ast::table_ops::group_by::GroupBy; use super::ast::table_ops::limit::Limit; use crate::TimeseriesName; @@ -25,6 +28,7 @@ use crate::oxql::ast::table_ops::filter::Filter; use crate::oxql::fmt_parse_error; use chrono::DateTime; use chrono::Utc; +use uuid::Uuid; /// A parsed OxQL query. #[derive(Clone, Debug, PartialEq)] @@ -33,6 +37,12 @@ pub struct Query { pub(super) end_time: DateTime, } +pub enum QueryAuthzScope { + Fleet, + Silo { silo_id: Uuid }, + Project { silo_id: Uuid, project_id: Uuid }, +} + impl Query { /// Construct a query written in OxQL. pub fn new(query: impl AsRef) -> Result { @@ -362,6 +372,35 @@ impl Query { pub(crate) fn parsed_query(&self) -> &QueryNode { &self.parsed } + + /// Insert silo and project filters after the `get`, or in the case of + /// subqueries, recurse down the tree and insert them after each get. + pub(crate) fn insert_authz_filters(&self, scope: QueryAuthzScope) -> Self { + let filtered_query = match scope { + QueryAuthzScope::Fleet => self.parsed.clone(), + QueryAuthzScope::Silo { silo_id } => self + .parsed + .insert_filters(vec![uuid_eq_filter("silo_id", silo_id)]), + QueryAuthzScope::Project { silo_id, project_id } => { + self.parsed.insert_filters(vec![ + uuid_eq_filter("silo_id", silo_id), + uuid_eq_filter("project_id", project_id), + ]) + } + }; + Self { parsed: filtered_query, end_time: self.end_time } + } +} + +/// Just a helper for creating a UUID filter node concisely +fn uuid_eq_filter(key: impl AsRef, id: Uuid) -> Filter { + let simple_filter = SimpleFilter { + ident: Ident(key.as_ref().to_string()), + cmp: Comparison::Eq, + value: Literal::Uuid(id), + }; + let filter_expr = FilterExpr::Simple(simple_filter); + Filter { negated: false, expr: filter_expr } } // Return a new filter containing only parts that refer to either: @@ -412,6 +451,7 @@ mod tests { use crate::oxql::ast::literal::Literal; use crate::oxql::ast::logical_op::LogicalOp; use crate::oxql::ast::table_ops::BasicTableOp; + use crate::oxql::ast::table_ops::GroupedTableOp; use crate::oxql::ast::table_ops::TableOp; use crate::oxql::ast::table_ops::filter::CompoundFilter; use crate::oxql::ast::table_ops::filter::FilterExpr; @@ -419,10 +459,14 @@ mod tests { use crate::oxql::ast::table_ops::join::Join; use crate::oxql::ast::table_ops::limit::Limit; use crate::oxql::ast::table_ops::limit::LimitKind; + use crate::oxql::query::QueryAuthzScope; use crate::oxql::query::restrict_filter_idents; + use crate::oxql::query::uuid_eq_filter; + use assert_matches::assert_matches; use chrono::NaiveDateTime; use chrono::Utc; use std::time::Duration; + use uuid::Uuid; #[test] fn test_restrict_filter_idents_single_atom() { @@ -1006,4 +1050,154 @@ mod tests { inner query contains an incompatible timestamp filter" ); } + + #[test] + fn test_insert_filters() { + let query = Query::new("get a:b | filter timestamp > @now()").unwrap(); + let silo_id = Uuid::new_v4(); + let project_id = Uuid::new_v4(); + let scope = QueryAuthzScope::Project { silo_id, project_id }; + let new_query = query.insert_authz_filters(scope); + + assert_eq!(query.parsed.table_ops().len(), 2); + assert_eq!(new_query.parsed.table_ops().len(), 4); + + // inserted after the get + assert_eq!( + new_query.parsed.table_ops().nth(1).unwrap().to_string(), + format!("filter (silo_id == \"{}\")", silo_id) + ); + assert_eq!( + new_query.parsed.table_ops().nth(2).unwrap().to_string(), + format!("filter (project_id == \"{}\")", project_id) + ); + } + + #[test] + fn test_insert_filters_with_subqueries() { + let query = Query::new( + "{ get a:b | filter timestamp > @2025-03-05; get c:d } | filter timestamp > @2025-01-01", + ) + .unwrap(); + + let silo_id = Uuid::new_v4(); + let project_id = Uuid::new_v4(); + + // Define expected filters as AST nodes + let silo_filter = uuid_eq_filter("silo_id", silo_id); + let project_filter = uuid_eq_filter("project_id", project_id); + + let expected_silo_op = + TableOp::Basic(BasicTableOp::Filter(silo_filter.clone())); + let expected_project_op = + TableOp::Basic(BasicTableOp::Filter(project_filter.clone())); + + let scope = QueryAuthzScope::Project { silo_id, project_id }; + let new_query = query.insert_authz_filters(scope); + + // Check top-level structure (should remain one grouped op and one filter) + let orig_ops = query.parsed.table_ops().collect::>(); + assert_eq!(orig_ops.len(), 2); + assert_matches!(orig_ops[1], TableOp::Basic(BasicTableOp::Filter(_))); + + let new_ops = new_query.parsed.table_ops().collect::>(); + assert_eq!(new_ops.len(), 2); + + // second filter op unchanged + assert_eq!(orig_ops[1], new_ops[1]); + + let only_op = new_query.parsed.first_op(); + let TableOp::Grouped(GroupedTableOp { ops }) = only_op else { + panic!("Expected the only operation to be TableOp::Grouped"); + }; + + assert_eq!(ops.len(), 2, "Expected two subqueries in the group"); + + // first subquery has the original get and filter and now two extra filters in the middle + let subq1: Vec<_> = ops[0].table_ops().cloned().collect(); + assert_eq!(subq1.len(), 4,); + assert_matches!(subq1[0], TableOp::Basic(BasicTableOp::Get(_))); + assert_eq!(subq1[1], expected_silo_op); + assert_eq!(subq1[2], expected_project_op); + assert_matches!(subq1[3], TableOp::Basic(BasicTableOp::Filter(_))); + + // second subquery has the original get and now two extra filters + let subq2: Vec<_> = ops[1].table_ops().cloned().collect(); + assert_eq!(subq2.len(), 3); + assert_matches!(subq2[0], TableOp::Basic(BasicTableOp::Get(_))); + assert_eq!(subq2[1], expected_silo_op); + assert_eq!(subq2[2], expected_project_op); + } + + #[test] + fn test_insert_filters_with_nested_subqueries() { + let query_str = "{ get a:b | filter timestamp > @2025-03-05; { get c:d; get e:f | filter timestamp < @2025-04-06 }; get g:h }"; + let query = Query::new(query_str).unwrap(); + let silo_id = Uuid::new_v4(); + let project_id = Uuid::new_v4(); + + // Define expected filters as AST nodes + let silo_filter = uuid_eq_filter("silo_id", silo_id); + let project_filter = uuid_eq_filter("project_id", project_id); + + let expected_silo_op = + TableOp::Basic(BasicTableOp::Filter(silo_filter.clone())); + let expected_project_op = + TableOp::Basic(BasicTableOp::Filter(project_filter.clone())); + + let scope = QueryAuthzScope::Project { silo_id, project_id }; + let new_query = query.insert_authz_filters(scope); + + // Check top-level structure (should remain a single grouped op) + assert_eq!(query.parsed.table_ops().len(), 1); + assert_eq!(new_query.parsed.table_ops().len(), 1); + + let top_op = new_query.parsed.first_op(); + let TableOp::Grouped(GroupedTableOp { ops: top_ops }) = top_op else { + panic!("Expected the top operation to be TableOp::Grouped"); + }; + + assert_eq!(top_ops.len(), 3,); + + // Check first subquery (get a:b | filter ...) + let subq1: Vec<_> = top_ops[0].table_ops().cloned().collect(); + assert_eq!(subq1.len(), 4, "Expected 4 ops in subquery 1"); + assert_matches!(subq1[0], TableOp::Basic(BasicTableOp::Get(_))); + assert_eq!(subq1[1], expected_silo_op); + assert_eq!(subq1[2], expected_project_op); + assert_matches!(subq1[3], TableOp::Basic(BasicTableOp::Filter(_))); // Original filter + + // Check second subquery (the nested group { get c:d; get e:f | filter ... }) + let nested_ops = &top_ops[1].table_ops().collect::>(); + assert_eq!(nested_ops.len(), 1); + + let TableOp::Grouped(GroupedTableOp { ops: nested_queries }) = + nested_ops[0] + else { + panic!("Expected the top operation to be TableOp::Grouped"); + }; + let nested_subq1 = nested_queries[0].table_ops().collect::>(); + assert_eq!(nested_subq1.len(), 3); + assert_matches!(nested_subq1[0], TableOp::Basic(BasicTableOp::Get(_))); + assert_eq!(nested_subq1[1], &expected_silo_op); + assert_eq!(nested_subq1[2], &expected_project_op); + + // Check second nested subquery (get e:f | filter ...) + let nested_subq2 = nested_queries[1].table_ops().collect::>(); + assert_eq!(nested_subq2.len(), 4); + assert_matches!(nested_subq2[0], TableOp::Basic(BasicTableOp::Get(_))); + assert_eq!(nested_subq2[1], &expected_silo_op); + assert_eq!(nested_subq2[2], &expected_project_op); + assert_matches!( + nested_subq2[3], + TableOp::Basic(BasicTableOp::Filter(_)) + ); // Original filter + + // Check third subquery (get g:h) + let subq3: Vec<_> = top_ops[2].table_ops().cloned().collect(); + assert_eq!(subq3.len(), 3, "Expected 3 ops in subquery 3"); + assert_matches!(subq3[0], TableOp::Basic(BasicTableOp::Get(_))); + assert_eq!(subq3[1], expected_silo_op); + assert_eq!(subq3[2], expected_project_op); + } } diff --git a/oximeter/db/src/shells/oxql.rs b/oximeter/db/src/shells/oxql.rs index 59051c03058..75bf3a344f7 100644 --- a/oximeter/db/src/shells/oxql.rs +++ b/oximeter/db/src/shells/oxql.rs @@ -7,7 +7,7 @@ // Copyright 2024 Oxide Computer use super::{list_timeseries, prepare_columns}; -use crate::{Client, OxqlResult, make_client}; +use crate::{Client, OxqlResult, make_client, oxql::query::QueryAuthzScope}; use clap::Args; use crossterm::style::Stylize; use oxql_types::Table; @@ -126,7 +126,10 @@ pub async fn shell( } } else { match client - .oxql_query(cmd.trim().trim_end_matches(';')) + .oxql_query( + cmd.trim().trim_end_matches(';'), + QueryAuthzScope::Fleet, + ) .await { Ok(result) => { diff --git a/oximeter/db/tests/integration_test.rs b/oximeter/db/tests/integration_test.rs index 0a0ffeef62d..1de904f14b7 100644 --- a/oximeter/db/tests/integration_test.rs +++ b/oximeter/db/tests/integration_test.rs @@ -7,6 +7,7 @@ use clickward::{BasePorts, Deployment, DeploymentConfig, KeeperId}; use dropshot::test_util::log_prefix_for_test; use omicron_test_utils::dev::poll; use omicron_test_utils::dev::test_setup_log; +use oximeter_db::oxql::query::QueryAuthzScope; use oximeter_db::{Client, DbWrite, OxqlResult, Sample, TestDbWrite}; use oximeter_test_utils::wait_for_keepers; use slog::{Logger, info}; @@ -213,6 +214,7 @@ async fn test_cluster() -> anyhow::Result<()> { let oxql_res1 = client1 .oxql_query( "get virtual_machine:cpu_busy | filter timestamp > @2000-01-01", + QueryAuthzScope::Fleet, ) .await .expect("failed to get all samples"); @@ -427,7 +429,9 @@ async fn wait_for_num_points( poll::wait_for_condition( || async { let oxql_res = client - .oxql_query("get virtual_machine:cpu_busy | filter timestamp > @2000-01-01") + .oxql_query( + "get virtual_machine:cpu_busy | filter timestamp > @2000-01-01", + QueryAuthzScope::Fleet) .await .map_err(|_| { poll::CondCheckError::::NotYet