Skip to content

Commit

Permalink
simplify write_tag_to and fix bug writing chunk offsets
Browse files Browse the repository at this point in the history
  • Loading branch information
saecki committed Apr 24, 2021
1 parent 2cd1f59 commit dda96f7
Show file tree
Hide file tree
Showing 3 changed files with 48 additions and 65 deletions.
2 changes: 1 addition & 1 deletion src/atom/ident.rs
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ impl Ident for Fourcc {

impl fmt::Debug for Fourcc {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Ident({})", self.0.iter().map(|b| char::from(*b)).collect::<String>())
write!(f, "Fourcc({})", self.0.iter().map(|b| char::from(*b)).collect::<String>())
}
}

Expand Down
18 changes: 8 additions & 10 deletions src/atom/info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -258,27 +258,26 @@ pub(super) struct ChunkOffsetInfo {

impl ChunkOffsetInfo {
pub(super) fn parse(reader: &mut (impl Read + Seek), len: u64) -> crate::Result<Self> {
let pos = reader.seek(SeekFrom::Current(0))? + 8;

let (version, _) = parse_ext_head(reader)?;

match version {
0 => {
let entries = data::read_u32(reader)?;
if 8 + 4 * entries as u64 > len {
if 8 + 4 * entries as u64 != len {
return Err(crate::Error::new(
crate::ErrorKind::Parsing,
"Sample table chunk offset (stco) size too big".to_owned(),
"Sample table chunk offset (stco) offset table size doesn't match atom length".to_owned(),
));
}

let table_pos = reader.seek(SeekFrom::Current(0))?;
let mut offsets = Vec::with_capacity(entries as usize);
for _ in 0..entries {
let offset = data::read_u32(reader)?;
offsets.push(offset);
}

Ok(Self { table_pos: pos, offsets })
Ok(Self { table_pos, offsets })
}
_ => Err(crate::Error::new(
crate::ErrorKind::UnknownVersion(version),
Expand All @@ -297,27 +296,26 @@ pub(super) struct ChunkOffsetInfo64 {

impl ChunkOffsetInfo64 {
pub(super) fn parse(reader: &mut (impl Read + Seek), len: u64) -> crate::Result<Self> {
let pos = reader.seek(SeekFrom::Current(0))? + 8;

let (version, _) = parse_ext_head(reader)?;

match version {
0 => {
let entries = data::read_u32(reader)?;
if 8 + 8 * entries as u64 > len {
if 8 + 8 * entries as u64 != len {
return Err(crate::Error::new(
crate::ErrorKind::Parsing,
"Sample table chunk offset (stco) size too big".to_owned(),
"Sample table chunk offset 64 (co64) offset table size doesn't match atom length".to_owned(),
));
}

let table_pos = reader.seek(SeekFrom::Current(0))?;
let mut offsets = Vec::with_capacity(entries as usize);
for _ in 0..entries {
let offset = data::read_u64(reader)?;
offsets.push(offset);
}

Ok(Self { table_pos: pos, offsets })
Ok(Self { table_pos, offsets })
}
_ => Err(crate::Error::new(
crate::ErrorKind::UnknownVersion(version),
Expand Down
93 changes: 39 additions & 54 deletions src/atom/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -685,42 +685,28 @@ pub(crate) fn write_tag_to(file: &File, atoms: &[AtomData]) -> crate::Result<()>
let hdlr = meta.and_then(|a| a.atoms.iter().find(|a| a.ident == HANDLER_REFERENCE));
let ilst = meta.and_then(|a| a.atoms.iter().find(|a| a.ident == ITEM_LIST));

let metadata_len: u64 = atoms.iter().map(|a| a.len()).sum();

let mut update_atoms = Vec::new();
let mut new_atoms = Vec::new();
let mut new_atoms_start = 0;
let mut moved_data_start = 0;
let mut len_diff = 0;
let metadata_start;
let write_metadta_separately;

if hdlr.is_none() {
if let None = hdlr {
new_atoms.push(template::meta_handler_reference_atom());
}
match ilst {
Some(ilst) => {
update_atoms.push(ilst);
new_atoms_start = ilst.content_pos() + metadata_len;
moved_data_start = ilst.end();
let new_atom_len: u64 = new_atoms.iter().map(|a| a.len()).sum();
len_diff = (new_atom_len + metadata_len) as i64 - ilst.content_len() as i64;
metadata_start = ilst.content_pos();
write_metadta_separately = true;
}
None => {
new_atoms.push(Atom::new(ITEM_LIST, 0, Content::AtomDataRef(atoms)));
metadata_start = 0;
write_metadta_separately = false;
}
if let Some(ilst) = ilst {
new_atoms_start = ilst.pos;
moved_data_start = ilst.end();
len_diff -= ilst.len as i64;
}
new_atoms.push(Atom::new(ITEM_LIST, 0, Content::AtomDataRef(atoms)));

match meta {
Some(meta) => {
update_atoms.push(meta);
if let None = ilst {
new_atoms_start = meta.end();
moved_data_start = meta.end();
len_diff = new_atoms.iter().map(|a| a.len()).sum::<u64>() as i64;
}
}
None => {
Expand All @@ -733,16 +719,15 @@ pub(crate) fn write_tag_to(file: &File, atoms: &[AtomData]) -> crate::Result<()>
if let None = meta {
new_atoms_start = udta.end();
moved_data_start = udta.end();
len_diff = new_atoms.iter().map(|a| a.len()).sum::<u64>() as i64;
}
}
None => {
new_atoms = vec![Atom::new(USER_DATA, 0, Content::Atoms(new_atoms))];
new_atoms_start = moov.end();
moved_data_start = moov.end();
len_diff = new_atoms.iter().map(|a| a.len()).sum::<u64>() as i64;
}
}
len_diff += new_atoms.iter().map(|a| a.len()).sum::<u64>() as i64;
update_atoms.push(moov);

// reading moved data
Expand All @@ -764,36 +749,37 @@ pub(crate) fn write_tag_to(file: &File, atoms: &[AtomData]) -> crate::Result<()>
.filter_map(|a| a.atoms.iter().find(|a| a.ident == MEDIA_INFORMATION))
.filter_map(|a| a.atoms.iter().find(|a| a.ident == SAMPLE_TABLE));

for a in stbl_atoms {
match a.ident {
SAMPLE_TABLE_CHUNK_OFFSET => {
reader.seek(SeekFrom::Start(a.content_pos()))?;
let chunk_offset = ChunkOffsetInfo::parse(&mut reader, a.content_len())?;

writer.seek(SeekFrom::Start(chunk_offset.table_pos))?;
for co in chunk_offset.offsets.iter() {
let new_offset = (*co as i64 + len_diff) as u32;
writer.write_all(&u32::to_be_bytes(new_offset))?;
for stbl in stbl_atoms {
for a in stbl.atoms.iter() {
match a.ident {
SAMPLE_TABLE_CHUNK_OFFSET => {
reader.seek(SeekFrom::Start(a.content_pos()))?;
let chunk_offset = ChunkOffsetInfo::parse(&mut reader, a.content_len())?;

writer.seek(SeekFrom::Start(chunk_offset.table_pos))?;
for co in chunk_offset.offsets.iter() {
let new_offset = (*co as i64 + len_diff) as u32;
writer.write_all(&u32::to_be_bytes(new_offset))?;
}
writer.flush()?;
}
}
SAMPLE_TABLE_CHUNK_OFFSET_64 => {
reader.seek(SeekFrom::Start(a.content_pos()))?;
let chunk_offset = ChunkOffsetInfo64::parse(&mut reader, a.content_len())?;

writer.seek(SeekFrom::Start(chunk_offset.table_pos))?;
for co in chunk_offset.offsets.iter() {
let new_offset = (*co as i64 + len_diff) as u64;
writer.write_all(&u64::to_be_bytes(new_offset))?;
SAMPLE_TABLE_CHUNK_OFFSET_64 => {
reader.seek(SeekFrom::Start(a.content_pos()))?;
let chunk_offset = ChunkOffsetInfo64::parse(&mut reader, a.content_len())?;

writer.seek(SeekFrom::Start(chunk_offset.table_pos))?;
for co in chunk_offset.offsets.iter() {
let new_offset = (*co as i64 + len_diff) as u64;
writer.write_all(&u64::to_be_bytes(new_offset))?;
}
writer.flush()?;
}
_ => (),
}
_ => (),
}
}
}

// adjusting the file length
file.set_len((old_file_len as i64 + len_diff) as u64)?;

// update existing ilst hierarchy atom lengths
for a in update_atoms.iter().rev() {
let new_len = a.len as i64 + len_diff;
Expand All @@ -808,21 +794,19 @@ pub(crate) fn write_tag_to(file: &File, atoms: &[AtomData]) -> crate::Result<()>
}
}

// adjusting the file length
file.set_len((old_file_len as i64 + len_diff) as u64)?;

// write missing ilst hierarchy and metadata
if !new_atoms.is_empty() {
writer.seek(SeekFrom::Start(new_atoms_start))?;
for a in new_atoms.iter() {
a.write_to(&mut writer)?;
}
}
if write_metadta_separately {
writer.seek(SeekFrom::Start(metadata_start))?;
for a in atoms {
a.write_to(&mut writer)?;
}
}

// writing moved data
writer.seek(SeekFrom::Start((moved_data_start as i64 + len_diff) as u64))?;
writer.write_all(&moved_data)?;
writer.flush()?;

Expand All @@ -841,9 +825,10 @@ pub(crate) fn dump_tag_to(writer: &mut impl Write, atoms: &[AtomData]) -> crate:
#[rustfmt::skip]
let moov = Atom::new(MOVIE, 0, Content::atom(
Atom::new(USER_DATA, 0, Content::atom(
Atom::new(METADATA, 4, Content::atom(
Atom::new(METADATA, 4, Content::Atoms(vec![
template::meta_handler_reference_atom(),
Atom::new(ITEM_LIST, 0, Content::Atoms(atoms))
)),
])),
)),
));

Expand Down

0 comments on commit dda96f7

Please sign in to comment.