Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Upgrade rust to 1.57 #4723

Merged
merged 7 commits into from
Dec 20, 2021
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 0 additions & 9 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -364,15 +364,6 @@ jobs:
- store_artifacts:
path: raw_xcodebuild.log
destination: logs/raw_xcodebuild.log
- run:
name: Install Rust Nightly
command: |
# For now we need the nightly toolchain to build for the M1 simulator.
rustup install nightly
# For now we need to build the Rust stdlib from source for the M1 simulator.
rustup component add rust-src --toolchain nightly-x86_64-apple-darwin
rustup toolchain add nightly --profile minimal
rustup target add aarch64-apple-ios-sim --toolchain nightly
- run:
name: Build XCFramework archive
command: |
Expand Down
2 changes: 1 addition & 1 deletion components/autofill/src/db/addresses.rs
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ pub(crate) fn get_address(conn: &Connection, guid: &Guid) -> Result<InternalAddr
WHERE guid = :guid",
common_cols = ADDRESS_COMMON_COLS
);
conn.query_row(&sql, &[guid], |row| InternalAddress::from_row(row))
conn.query_row(&sql, &[guid], InternalAddress::from_row)
.map_err(|e| match e {
rusqlite::Error::QueryReturnedNoRows => Error::NoSuchRecord(guid.to_string()),
e => e.into(),
Expand Down
3 changes: 3 additions & 0 deletions components/fxa-client/src/internal/push.rs
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,9 @@ pub enum PushPayload {
Unknown,
}

// Some of this structs fields are not read, except
// when deserialized, we mark them as dead_code
#[allow(dead_code)]
#[derive(Debug, Deserialize)]
pub struct CommandReceivedPushPayload {
command: String,
Expand Down
2 changes: 1 addition & 1 deletion components/logins/src/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ use std::ffi::OsString;
// into `error_support`.
macro_rules! throw {
($e:expr) => {
return Err(Into::into($e));
return Err(Into::into($e))
};
}

Expand Down
22 changes: 11 additions & 11 deletions components/nimbus/src/persistence.rs
Original file line number Diff line number Diff line change
Expand Up @@ -141,24 +141,24 @@ impl SingleStore {

pub fn put<T: serde::Serialize + for<'de> serde::Deserialize<'de>>(
&self,
mut writer: &mut Writer,
writer: &mut Writer,
key: &str,
persisted_data: &T,
) -> Result<()> {
let persisted_json = serde_json::to_string(persisted_data)?;
self.store
.put(&mut writer, key, &rkv::Value::Json(&persisted_json))?;
.put(writer, key, &rkv::Value::Json(&persisted_json))?;
Ok(())
}

#[allow(dead_code)]
pub fn delete(&self, mut writer: &mut Writer, key: &str) -> Result<()> {
self.store.delete(&mut writer, key)?;
pub fn delete(&self, writer: &mut Writer, key: &str) -> Result<()> {
self.store.delete(writer, key)?;
Ok(())
}

pub fn clear(&self, mut writer: &mut Writer) -> Result<()> {
self.store.clear(&mut writer)?;
pub fn clear(&self, writer: &mut Writer) -> Result<()> {
self.store.clear(writer)?;
Ok(())
}

Expand Down Expand Up @@ -336,7 +336,7 @@ impl Database {
/// to assume that this is unrecoverable and wipe the database, removing
/// people from any existing enrollments and blowing away their experiment
/// history, so that they don't get left in an inconsistent state.
fn migrate_v1_to_v2(&self, mut writer: &mut Writer) -> Result<()> {
fn migrate_v1_to_v2(&self, writer: &mut Writer) -> Result<()> {
log::info!("Upgrading from version 1 to version 2");

// use try_collect_all to read everything except records that serde
Expand Down Expand Up @@ -392,16 +392,16 @@ impl Database {
log::debug!("updated enrollments = {:?}", updated_enrollments);

// rewrite both stores
self.experiment_store.clear(&mut writer)?;
self.experiment_store.clear(writer)?;
for experiment in updated_experiments {
self.experiment_store
.put(&mut writer, &experiment.slug, &experiment)?;
.put(writer, &experiment.slug, &experiment)?;
}

self.enrollment_store.clear(&mut writer)?;
self.enrollment_store.clear(writer)?;
for enrollment in updated_enrollments {
self.enrollment_store
.put(&mut writer, &enrollment.slug, &enrollment)?;
.put(writer, &enrollment.slug, &enrollment)?;
}
log::debug!("exiting migrate_v1_to_v2");

Expand Down
46 changes: 23 additions & 23 deletions components/places/src/bookmark_sync/engine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1929,36 +1929,36 @@ mod tests {
let node = tree
.node_for_guid(&"qqVTRWhLBOu3".into())
.expect("should exist");
assert_eq!(node.needs_merge, true);
assert!(node.needs_merge);
assert_eq!(node.validity, Validity::Valid);
assert_eq!(node.level(), 2);
assert_eq!(node.is_syncable(), true);
assert!(node.is_syncable());

let node = tree
.node_for_guid(&BookmarkRootGuid::Unfiled.as_guid().as_str().into())
.expect("should exist");
assert_eq!(node.needs_merge, true);
assert!(node.needs_merge);
assert_eq!(node.validity, Validity::Valid);
assert_eq!(node.level(), 1);
assert_eq!(node.is_syncable(), true);
assert!(node.is_syncable());

let node = tree
.node_for_guid(&BookmarkRootGuid::Menu.as_guid().as_str().into())
.expect("should exist");
assert_eq!(node.needs_merge, false);
assert!(!node.needs_merge);
assert_eq!(node.validity, Validity::Valid);
assert_eq!(node.level(), 1);
assert_eq!(node.is_syncable(), true);
assert!(node.is_syncable());

let node = tree
.node_for_guid(&BookmarkRootGuid::Root.as_guid().as_str().into())
.expect("should exist");
assert_eq!(node.validity, Validity::Valid);
assert_eq!(node.level(), 0);
assert_eq!(node.is_syncable(), false);
assert!(!node.is_syncable());

// We should have changes.
assert_eq!(db_has_changes(&conn).unwrap(), true);
assert!(db_has_changes(&conn).unwrap());
Ok(())
}

Expand Down Expand Up @@ -2003,38 +2003,38 @@ mod tests {
let node = tree
.node_for_guid(&"bookmark1___".into())
.expect("should exist");
assert_eq!(node.needs_merge, true);
assert!(node.needs_merge);
assert_eq!(node.level(), 2);
assert_eq!(node.is_syncable(), true);
assert!(node.is_syncable());
assert_eq!(node.age, 10000);

let node = tree
.node_for_guid(&BookmarkRootGuid::Unfiled.as_guid().as_str().into())
.expect("should exist");
assert_eq!(node.needs_merge, true);
assert!(node.needs_merge);
assert_eq!(node.level(), 1);
assert_eq!(node.is_syncable(), true);
assert!(node.is_syncable());

let node = tree
.node_for_guid(&BookmarkRootGuid::Menu.as_guid().as_str().into())
.expect("should exist");
assert_eq!(node.needs_merge, false);
assert!(!node.needs_merge);
assert_eq!(node.level(), 1);
assert_eq!(node.is_syncable(), true);
assert!(node.is_syncable());

let node = tree
.node_for_guid(&BookmarkRootGuid::Root.as_guid().as_str().into())
.expect("should exist");
assert_eq!(node.needs_merge, false);
assert!(!node.needs_merge);
assert_eq!(node.level(), 0);
assert_eq!(node.is_syncable(), false);
assert!(!node.is_syncable());
// hard to know the exact age of the root, but we know the max.
let max_dur = SystemTime::now().duration_since(now).unwrap();
let max_age = max_dur.as_secs() as i64 * 1000 + i64::from(max_dur.subsec_millis());
assert!(node.age <= max_age);

// We should have changes.
assert_eq!(db_has_changes(&syncer).unwrap(), true);
assert!(db_has_changes(&syncer).unwrap());
Ok(())
}

Expand Down Expand Up @@ -2909,11 +2909,11 @@ mod tests {
let info_for_a = get_raw_bookmark(&writer, &guid_for_a)
.expect("Should fetch info for A")
.unwrap();
assert_eq!(info_for_a.sync_change_counter, 2);
assert_eq!(info_for_a._sync_change_counter, 2);
let info_for_unfiled = get_raw_bookmark(&writer, &BookmarkRootGuid::Unfiled.as_guid())
.expect("Should fetch info for unfiled")
.unwrap();
assert_eq!(info_for_unfiled.sync_change_counter, 2);
assert_eq!(info_for_unfiled._sync_change_counter, 2);

engine
.sync_finished(
Expand All @@ -2929,11 +2929,11 @@ mod tests {
let info_for_a = get_raw_bookmark(&writer, &guid_for_a)
.expect("Should fetch info for A")
.unwrap();
assert_eq!(info_for_a.sync_change_counter, 0);
assert_eq!(info_for_a._sync_change_counter, 0);
let info_for_unfiled = get_raw_bookmark(&writer, &BookmarkRootGuid::Unfiled.as_guid())
.expect("Should fetch info for unfiled")
.unwrap();
assert_eq!(info_for_unfiled.sync_change_counter, 0);
assert_eq!(info_for_unfiled._sync_change_counter, 0);

let mut tags_for_c = tags::get_tags_for_url(
&writer,
Expand Down Expand Up @@ -4322,8 +4322,8 @@ mod tests {
let bm = get_raw_bookmark(&writer, &guid.into())
.expect("must work")
.expect("must exist");
assert_eq!(bm.sync_status, SyncStatus::Normal, "{}", guid);
assert_eq!(bm.sync_change_counter, 0, "{}", guid);
assert_eq!(bm._sync_status, SyncStatus::Normal, "{}", guid);
assert_eq!(bm._sync_change_counter, 0, "{}", guid);
}
// And bookmarkEEEE wasn't on the server, so should be outgoing, and
// it's parent too.
Expand Down
44 changes: 22 additions & 22 deletions components/places/src/storage/bookmarks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1119,11 +1119,11 @@ pub fn insert_tree(db: &PlacesDb, tree: &FolderNode) -> Result<()> {
#[derive(Debug)]
struct FetchedTreeRow {
level: u32,
id: RowId,
_id: RowId,
guid: SyncGuid,
// parent and parent_guid are Option<> only to handle the root - we would
// assert but they aren't currently used.
parent: Option<RowId>,
_parent: Option<RowId>,
parent_guid: Option<SyncGuid>,
node_type: BookmarkType,
position: u32,
Expand All @@ -1138,9 +1138,9 @@ impl FetchedTreeRow {
let url = row.get::<_, Option<String>>("url")?;
Ok(Self {
level: row.get("level")?,
id: row.get::<_, RowId>("id")?,
_id: row.get::<_, RowId>("id")?,
guid: row.get::<_, String>("guid")?.into(),
parent: row.get::<_, Option<RowId>>("parent")?,
_parent: row.get::<_, Option<RowId>>("parent")?,
parent_guid: row
.get::<_, Option<String>>("parentGuid")?
.map(SyncGuid::from),
Expand All @@ -1167,8 +1167,8 @@ fn inflate(
.and_then(|guid| pseudo_tree.remove(guid))
{
parent.children = children;
for mut child in &mut parent.children {
inflate(&mut child, pseudo_tree);
for child in &mut parent.children {
inflate(child, pseudo_tree);
}
}
}
Expand Down Expand Up @@ -1370,10 +1370,10 @@ pub(crate) struct RawBookmark {
pub date_added: Timestamp,
pub date_modified: Timestamp,
pub guid: SyncGuid,
pub sync_status: SyncStatus,
pub sync_change_counter: u32,
pub _sync_status: SyncStatus,
pub _sync_change_counter: u32,
pub child_count: u32,
pub grandparent_id: Option<RowId>,
pub _grandparent_id: Option<RowId>,
}

impl RawBookmark {
Expand All @@ -1396,12 +1396,12 @@ impl RawBookmark {
date_added: row.get("dateAdded")?,
date_modified: row.get("lastModified")?,
guid: row.get::<_, String>("guid")?.into(),
sync_status: SyncStatus::from_u8(row.get::<_, u8>("_syncStatus")?),
sync_change_counter: row
_sync_status: SyncStatus::from_u8(row.get::<_, u8>("_syncStatus")?),
_sync_change_counter: row
.get::<_, Option<u32>>("syncChangeCounter")?
.unwrap_or_default(),
child_count: row.get("_childCount")?,
grandparent_id: row.get("_grandparentId")?,
_grandparent_id: row.get("_grandparentId")?,
})
}
}
Expand Down Expand Up @@ -1674,14 +1674,14 @@ mod tests {
assert_eq!(rb.position, 0);
assert_eq!(rb.title, Some("the title".into()));
assert_eq!(rb.url, Some(url));
assert_eq!(rb.sync_status, SyncStatus::New);
assert_eq!(rb.sync_change_counter, 1);
assert_eq!(rb._sync_status, SyncStatus::New);
assert_eq!(rb._sync_change_counter, 1);
assert!(global_change_tracker.changed());
assert_eq!(rb.child_count, 0);

let unfiled = get_raw_bookmark(&conn, &BookmarkRootGuid::Unfiled.as_guid())?
.expect("should get unfiled");
assert_eq!(unfiled.sync_change_counter, 1);
assert_eq!(unfiled._sync_change_counter, 1);

Ok(())
}
Expand Down Expand Up @@ -2096,7 +2096,7 @@ mod tests {
)?;
let bm = get_raw_bookmark(&conn, &guid)?.expect("should exist");
assert_eq!(bm.title, Some("the bookmark".to_string()));
assert_eq!(bm.sync_change_counter, 0);
assert_eq!(bm._sync_change_counter, 0);

// Update to the same value is still not a change.
update_bookmark(
Expand All @@ -2110,7 +2110,7 @@ mod tests {
)?;
let bm = get_raw_bookmark(&conn, &guid)?.expect("should exist");
assert_eq!(bm.title, Some("the bookmark".to_string()));
assert_eq!(bm.sync_change_counter, 0);
assert_eq!(bm._sync_change_counter, 0);

// Update to an empty string sets it to null
update_bookmark(
Expand All @@ -2124,7 +2124,7 @@ mod tests {
)?;
let bm = get_raw_bookmark(&conn, &guid)?.expect("should exist");
assert_eq!(bm.title, None);
assert_eq!(bm.sync_change_counter, 1);
assert_eq!(bm._sync_change_counter, 1);

Ok(())
}
Expand Down Expand Up @@ -2569,15 +2569,15 @@ mod tests {

let bmk = get_raw_bookmark(&conn, &"bookmarkAAAA".into())?
.expect("Should fetch A before resetting");
assert_eq!(bmk.sync_change_counter, 0);
assert_eq!(bmk.sync_status, SyncStatus::Normal);
assert_eq!(bmk._sync_change_counter, 0);
assert_eq!(bmk._sync_status, SyncStatus::Normal);

bookmark_sync::reset(&conn, &EngineSyncAssociation::Disconnected)?;

let bmk = get_raw_bookmark(&conn, &"bookmarkAAAA".into())?
.expect("Should fetch A after resetting");
assert_eq!(bmk.sync_change_counter, 1);
assert_eq!(bmk.sync_status, SyncStatus::New);
assert_eq!(bmk._sync_change_counter, 1);
assert_eq!(bmk._sync_status, SyncStatus::New);

// Ensure we reset Sync metadata, too.
let global = get_meta::<SyncGuid>(&conn, GLOBAL_SYNCID_META_KEY)?;
Expand Down
4 changes: 2 additions & 2 deletions components/support/nimbus-fml/src/parser.rs
Original file line number Diff line number Diff line change
Expand Up @@ -463,7 +463,7 @@ pub struct Parser {
enums: Vec<EnumDef>,
objects: Vec<ObjectDef>,
features: Vec<FeatureDef>,
channels: Vec<String>,
_channels: Vec<String>,
}

impl Parser {
Expand All @@ -480,7 +480,7 @@ impl Parser {
enums,
objects,
features,
channels: manifest.channels,
_channels: manifest.channels,
})
}

Expand Down
3 changes: 3 additions & 0 deletions components/support/rc_crypto/nss/src/pk11/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,9 @@ unsafe fn pk11_destroy_context_true(context: *mut nss_sys::PK11Context) {

// Trait for types that have PCKS#11 attributes that are readable. See
// https://searchfox.org/mozilla-central/rev/8ed8474757695cdae047150a0eaf94a5f1c96dbe/security/nss/lib/pk11wrap/pk11pub.h#842-864
/// # Safety
/// Unsafe since it needs to call [`nss_sys::PK11_ReadRawAttribute`] which is
/// a C NSS function, and thus inherently unsafe to call
pub(crate) unsafe trait Pkcs11Object: ScopedPtr {
const PK11_OBJECT_TYPE: nss_sys::PK11ObjectType;
fn read_raw_attribute(
Expand Down
Loading