From ad5ad981ba16f066c8a6dc506e907135c7a09735 Mon Sep 17 00:00:00 2001 From: Avi Deitcher Date: Wed, 10 Oct 2018 18:27:24 +0300 Subject: [PATCH] Initial cut of iso9660 --- .gitignore | 1 + .travis.yml | 6 +- Gopkg.lock | 21 + Gopkg.toml | 30 ++ Makefile | 2 +- README.md | 18 +- disk/disk.go | 7 + diskfs_test.go | 2 +- filesystem/filesystem.go | 2 + filesystem/iso9660/common_internal_test.go | 46 ++ filesystem/iso9660/directory.go | 44 ++ filesystem/iso9660/directory_internal_test.go | 113 ++++ filesystem/iso9660/directoryentry.go | 400 ++++++++++++++ .../iso9660/directoryentry_internal_test.go | 489 +++++++++++++++++ filesystem/iso9660/doc.go | 5 + filesystem/iso9660/file.go | 77 +++ filesystem/iso9660/file_test.go | 40 ++ filesystem/iso9660/finalize.go | 493 ++++++++++++++++++ filesystem/iso9660/finalize_internal_test.go | 201 +++++++ filesystem/iso9660/finalize_test.go | 140 +++++ filesystem/iso9660/iso9660.go | 405 ++++++++++++++ filesystem/iso9660/iso9660_internal_test.go | 66 +++ filesystem/iso9660/iso9660_test.go | 457 ++++++++++++++++ filesystem/iso9660/pathtable.go | 159 ++++++ filesystem/iso9660/pathtable_internal_test.go | 105 ++++ filesystem/iso9660/testdata/.gitignore | 1 + filesystem/iso9660/testdata/README.md | 54 ++ filesystem/iso9660/testdata/buildtestiso.sh | 13 + filesystem/iso9660/testdata/file.iso | Bin 0 -> 11018240 bytes filesystem/iso9660/testdata/isoutil.go | 288 ++++++++++ filesystem/iso9660/testdata/volrecords.iso | Bin 0 -> 8192 bytes filesystem/iso9660/util.go | 74 +++ filesystem/iso9660/volume_descriptor.go | 478 +++++++++++++++++ .../volume_descriptor_internal_test.go | 281 ++++++++++ testhelper/fileimpl.go | 7 + util/file.go | 1 + util/version.go | 5 + 37 files changed, 4523 insertions(+), 8 deletions(-) create mode 100644 .gitignore create mode 100644 Gopkg.lock create mode 100644 Gopkg.toml create mode 100644 filesystem/iso9660/common_internal_test.go create mode 100644 filesystem/iso9660/directory.go create mode 100644 filesystem/iso9660/directory_internal_test.go create mode 100644 filesystem/iso9660/directoryentry.go create mode 100644 filesystem/iso9660/directoryentry_internal_test.go create mode 100644 filesystem/iso9660/doc.go create mode 100644 filesystem/iso9660/file.go create mode 100644 filesystem/iso9660/file_test.go create mode 100644 filesystem/iso9660/finalize.go create mode 100644 filesystem/iso9660/finalize_internal_test.go create mode 100644 filesystem/iso9660/finalize_test.go create mode 100644 filesystem/iso9660/iso9660.go create mode 100644 filesystem/iso9660/iso9660_internal_test.go create mode 100644 filesystem/iso9660/iso9660_test.go create mode 100644 filesystem/iso9660/pathtable.go create mode 100644 filesystem/iso9660/pathtable_internal_test.go create mode 100644 filesystem/iso9660/testdata/.gitignore create mode 100644 filesystem/iso9660/testdata/README.md create mode 100644 filesystem/iso9660/testdata/buildtestiso.sh create mode 100644 filesystem/iso9660/testdata/file.iso create mode 100644 filesystem/iso9660/testdata/isoutil.go create mode 100644 filesystem/iso9660/testdata/volrecords.iso create mode 100644 filesystem/iso9660/util.go create mode 100644 filesystem/iso9660/volume_descriptor.go create mode 100644 filesystem/iso9660/volume_descriptor_internal_test.go create mode 100644 util/version.go diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..48b8bf90 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +vendor/ diff --git a/.travis.yml b/.travis.yml index e1a91e5e..89ff00fc 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,10 +6,12 @@ services: language: go go: - - 1.9.x + - 1.10.3 - master +before_install: +- curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh + script: - make image - make test - diff --git a/Gopkg.lock b/Gopkg.lock new file mode 100644 index 00000000..bd07c792 --- /dev/null +++ b/Gopkg.lock @@ -0,0 +1,21 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/satori/go.uuid" + packages = ["."] + revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3" + version = "v1.2.0" + +[[projects]] + branch = "master" + name = "golang.org/x/sys" + packages = ["unix"] + revision = "4497e2df6f9e69048a54498c7affbbec3294ad47" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "db113688e0bd2396a6e881d2f286fd0bc116c5f741633154206a95b45990d4c3" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml new file mode 100644 index 00000000..4d232e18 --- /dev/null +++ b/Gopkg.toml @@ -0,0 +1,30 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + name = "github.com/satori/go.uuid" + version = "1.2.0" + +[[constraint]] + branch = "master" + name = "golang.org/x/sys" diff --git a/Makefile b/Makefile index 4424acb0..5241a43a 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ image: docker build -t $(IMAGE) testhelper/docker dependencies: - @go get -t ./... + @dep ensure unit_test: dependencies @go test ./... diff --git a/README.md b/README.md index c42f3a12..42ad99e0 100644 --- a/README.md +++ b/README.md @@ -37,8 +37,8 @@ You do *not* need a partitioned disk to work with a filesystem; filesystems can ### Working With a Disk Before you can do anything with a disk - partitions or filesystems - you need to access it. -* If you have an existing disk, you `Open()` it -* If you are creating a new one, in general just for disk image files, you `Create()` it +* If you have an existing disk or image file, you `Open()` it +* If you are creating a new one, usually just disk image files, you `Create()` it Once you have a `Disk`, you can work with partitions or filesystems in it. @@ -57,7 +57,7 @@ Once you have a valid disk, and optionally partition, you can access filesystems * `CreateFilesystem()` - create a filesystem in an individual partition or the entire disk * `GetFilesystem()` - access an existing filesystem in a partition or the entire disk -As of this writing, supported filesystems include `FAT32`. +As of this writing, supported filesystems include `FAT32` and `ISO9660` (a.k.a. `.iso`). With a filesystem in hand, you can create, access and modify directories and files. @@ -73,6 +73,14 @@ With a `File` in hand, you then can: * `Read(b []byte)` from the file * `Seek(offset int64, whence int)` to set the next read or write to an offset in the file +### Read-Only Filesystems +Some filesystem types are intended to be created once, after which they are read-only, for example `ISO9660`/`.iso` and `squashfs`. + +`godiskfs` recognizes read-only filesystems and limits working with them to the following: + +* You can `GetFilesystem()` a read-only filesystem and do all read activities, but cannot write to them. Any attempt to `Mkdir()` or `OpenFile()` in write/append/create modes or `Write()` to the file will result in an error. +* You can `CreateFilesystem()` a read-only filesystem and write anything to it that you want. It will do all of its work in a "scratch" area, or temporary "workspace" directory on your local filesystem. When you are ready to complete it, you call `Finalize()`, after which it becomes read-only. If you forget to `Finalize()` it, you get... nothing. The `Finalize()` function exists only on read-only filesystems. + ### Examples The following example will create a fully bootable EFI disk image. It assumes you have a bootable EFI file (any modern Linux kernel compiled with `CONFIG_EFI_STUB=y` will work) available. @@ -139,5 +147,7 @@ Future plans are to add the following: * embed boot code in `mbr` e.g. `altmbr.bin` (no need for `gpt` since an ESP with `/EFI/BOOT/BOOT.EFI` will boot) * `ext4` filesystem -* `iso9660` / `Rock Ridge` filesystem +* `Rock Ridge` and `Joliet` extensions to `iso9660` +* `El Torito` booting extension to `iso9660` * `qcow` disk format +* `squashfs` filesystem diff --git a/disk/disk.go b/disk/disk.go index 57e5b6e8..721deabd 100644 --- a/disk/disk.go +++ b/disk/disk.go @@ -12,6 +12,7 @@ import ( "github.com/deitch/diskfs/filesystem" "github.com/deitch/diskfs/filesystem/fat32" + "github.com/deitch/diskfs/filesystem/iso9660" "github.com/deitch/diskfs/partition" ) @@ -127,6 +128,8 @@ func (d *Disk) CreateFilesystem(partition int, fstype filesystem.Type) (filesyst switch fstype { case filesystem.TypeFat32: return fat32.Create(d.File, size, start, d.LogicalBlocksize) + case filesystem.TypeISO9660: + return iso9660.Create(d.File, size, start, d.LogicalBlocksize) default: return nil, errors.New("Unknown filesystem type requested") } @@ -169,5 +172,9 @@ func (d *Disk) GetFilesystem(partition int) (filesystem.FileSystem, error) { if err == nil { return fat32FS, nil } + iso9660FS, err := iso9660.Read(d.File, size, start, d.LogicalBlocksize) + if err == nil { + return iso9660FS, nil + } return nil, fmt.Errorf("Unknown filesystem on partition %d", partition) } diff --git a/diskfs_test.go b/diskfs_test.go index 0b96a0d5..6ef2216d 100644 --- a/diskfs_test.go +++ b/diskfs_test.go @@ -61,7 +61,7 @@ func TestOpen(t *testing.T) { err error }{ {"", nil, fmt.Errorf("must pass device name")}, - {"/tmp/foo/bar/232323/23/2322/disk.img", nil, fmt.Errorf("provided device %s does not exist", "/tmp/foo/bar/232323/23/2322/disk.img")}, + {"/tmp/foo/bar/232323/23/2322/disk.img", nil, fmt.Errorf("")}, {path, &disk.Disk{Type: disk.File, LogicalBlocksize: 512, PhysicalBlocksize: 512, Size: size}, nil}, } diff --git a/filesystem/filesystem.go b/filesystem/filesystem.go index d1110896..9decfc77 100644 --- a/filesystem/filesystem.go +++ b/filesystem/filesystem.go @@ -20,4 +20,6 @@ type Type int const ( // TypeFat32 is a FAT32 compatible filesystem TypeFat32 Type = iota + // TypeISO9660 is an iso filesystem + TypeISO9660 ) diff --git a/filesystem/iso9660/common_internal_test.go b/filesystem/iso9660/common_internal_test.go new file mode 100644 index 00000000..d2654aa8 --- /dev/null +++ b/filesystem/iso9660/common_internal_test.go @@ -0,0 +1,46 @@ +package iso9660 + +import ( + "os" + "testing" + "time" +) + +const ( + ISO9660File = "./testdata/file.iso" + ISO9660Size = 11018240 +) + +func GetTestFile(t *testing.T) (*File, string) { + // we use the entry for FILENA01.;1 , which should have the content "filename_01" (without the quotes) + // see ./testdata/README.md + // + // entry: + // {recordSize:0x7a, extAttrSize:0x0, location:0x1422, size:0xb, creation:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}, isHidden:false, isSubdirectory:false, isAssociated:false, hasExtendedAttrs:false, hasOwnerGroupPermissions:false, hasMoreEntries:false, volumeSequence:0x0, filename:"FILENA01.;1"}, + // FileSystem implements the FileSystem interface + file, err := os.Open(ISO9660File) + if err != nil { + t.Errorf("Could not read ISO9660 test file %s: %v", ISO9660File, err) + } + fs := &FileSystem{ + workspace: "", + size: ISO9660Size, + start: 0, + file: file, + blocksize: 2048, + } + de := &directoryEntry{ + extAttrSize: 0, + location: 0x1422, + size: 0xb, + creation: time.Now(), + filesystem: fs, + filename: "FILENA01.;1", + } + return &File{ + directoryEntry: de, + isReadWrite: false, + isAppend: false, + offset: 0, + }, "filename_1\n" +} diff --git a/filesystem/iso9660/directory.go b/filesystem/iso9660/directory.go new file mode 100644 index 00000000..b17efca1 --- /dev/null +++ b/filesystem/iso9660/directory.go @@ -0,0 +1,44 @@ +package iso9660 + +// Directory represents a single directory in a FAT32 filesystem +type Directory struct { + directoryEntry + entries []*directoryEntry +} + +// dirEntriesFromBytes loads the directory entries from the raw bytes +func (d *Directory) entriesFromBytes(b []byte, f *FileSystem) error { + entries, err := parseDirEntries(b, f) + if err != nil { + return err + } + d.entries = entries + return nil +} + +// entriesToBytes convert our entries to raw bytes +func (d *Directory) entriesToBytes() ([]byte, error) { + b := make([]byte, 0) + blocksize := int(d.filesystem.blocksize) + for _, de := range d.entries { + b2, err := de.toBytes() + if err != nil { + return nil, err + } + // a directory entry cannot cross a block boundary + // so if adding this puts us past it, then pad it + // but only if we are not already exactly at the boundary + newlength := len(b) + len(b2) + left := blocksize - len(b)%blocksize + if left != 0 && newlength/blocksize > len(b)/blocksize { + b = append(b, make([]byte, left)...) + } + b = append(b, b2...) + } + // in the end, must pad to exact blocks + left := blocksize - len(b)%blocksize + if left > 0 { + b = append(b, make([]byte, left)...) + } + return b, nil +} diff --git a/filesystem/iso9660/directory_internal_test.go b/filesystem/iso9660/directory_internal_test.go new file mode 100644 index 00000000..f4585a09 --- /dev/null +++ b/filesystem/iso9660/directory_internal_test.go @@ -0,0 +1,113 @@ +package iso9660 + +import ( + "testing" +) + +// TestDirectoryEntriesFromBytes largely a duplicate of TestdirectoryEntryParseDirEntries +// it just loads it into the Directory structure +func TestDirectoryEntriesFromBytes(t *testing.T) { + fs := &FileSystem{blocksize: 2048} + validDe, _, _, b, err := getValidDirectoryEntries(fs) + if err != nil { + t.Fatal(err) + } + + d := &Directory{} + err = d.entriesFromBytes(b, fs) + switch { + case err != nil: + t.Errorf("Unexpected non-nil error: %v", err) + case d.entries == nil: + t.Errorf("unexpected nil entries") + case len(d.entries) != len(validDe): + t.Errorf("mismatched entries length actual %d vs expected %d", len(d.entries), len(validDe)) + default: + // run through them and see that they match + for i, de := range d.entries { + if !compareDirectoryEntriesIgnoreDates(de, validDe[i]) { + t.Errorf("%d: directoryEntry mismatch, actual then valid:", i) + t.Logf("%#v\n", de) + t.Logf("%#v\n", validDe[i]) + } + } + } + +} + +func TestDirectoryEntriesToBytes(t *testing.T) { + blocksize := 2048 + validDe, _, _, b, err := getValidDirectoryEntries(nil) + if err != nil { + t.Fatal(err) + } + d := &Directory{ + entries: validDe, + directoryEntry: directoryEntry{ + filesystem: &FileSystem{ + blocksize: int64(blocksize), + }, + }, + } + output, err := d.entriesToBytes() + // null the date bytes out + if err != nil { + t.Fatalf("unexpected non-nil error: %v", err) + } + // cannot directly compare the bytes as of yet, since the original contains all sorts of system area stuff + output = clearDatesDirectoryBytes(output, blocksize) + output = clearSuspDirectoryBytes(output, blocksize) + b = clearDatesDirectoryBytes(b, blocksize) + b = clearSuspDirectoryBytes(b, blocksize) + switch { + case output == nil: + t.Errorf("unexpected nil bytes") + case len(output) == 0: + t.Errorf("unexpected 0 length byte slice") + case len(output) != len(b): + t.Errorf("mismatched byte slice length actual %d, expected %d", len(output), len(b)) + case len(output)%blocksize != 0: + t.Errorf("output size was %d which is not a perfect multiple of %d", len(output), blocksize) + } +} + +func clearDatesDirectoryBytes(b []byte, blocksize int) []byte { + if b == nil { + return b + } + nullBytes := make([]byte, 7, 7) + for i := 0; i < len(b); { + // get the length of the current record + dirlen := int(b[i]) + if dirlen == 0 { + i += blocksize - blocksize%i + continue + } + copy(b[i+18:i+18+7], nullBytes) + i += dirlen + } + return b +} +func clearSuspDirectoryBytes(b []byte, blocksize int) []byte { + if b == nil { + return b + } + for i := 0; i < len(b); { + // get the length of the current record + dirlen := int(b[i+0]) + namelen := int(b[i+32]) + if dirlen == 0 { + i += blocksize - blocksize%i + continue + } + if namelen%2 == 0 { + namelen++ + } + nullByteStart := 33 + namelen + nullByteLen := dirlen - nullByteStart + nullBytes := make([]byte, nullByteLen, nullByteLen) + copy(b[i+nullByteStart:i+nullByteStart+nullByteLen], nullBytes) + i += dirlen + } + return b +} diff --git a/filesystem/iso9660/directoryentry.go b/filesystem/iso9660/directoryentry.go new file mode 100644 index 00000000..fd7abc20 --- /dev/null +++ b/filesystem/iso9660/directoryentry.go @@ -0,0 +1,400 @@ +package iso9660 + +import ( + "encoding/binary" + "fmt" + "os" + "path" + "regexp" + "strings" + "time" +) + +const ( + minDirectoryEntrySize uint8 = 34 // min size is all the required fields (33 bytes) plus 1 byte for the filename +) + +// directoryEntry is a single directory entry +// also fulfills os.FileInfo +// Name() string // base name of the file +// Size() int64 // length in bytes for regular files; system-dependent for others +// Mode() FileMode // file mode bits +// ModTime() time.Time // modification time +// IsDir() bool // abbreviation for Mode().IsDir() +// Sys() interface{} // underlying data source (can return nil) +type directoryEntry struct { + extAttrSize uint8 + location uint32 + size uint32 + creation time.Time + isHidden bool + isSubdirectory bool + isAssociated bool + hasExtendedAttrs bool + hasOwnerGroupPermissions bool + hasMoreEntries bool + isSelf bool + isParent bool + volumeSequence uint16 + filesystem *FileSystem + filename string +} + +func (de *directoryEntry) toBytes() ([]byte, error) { + // size includes the ";1" at the end as two bytes if a filename + var namelen byte + switch { + case de.isSelf: + namelen = 1 + case de.isParent: + namelen = 1 + default: + namelen = uint8(len(de.filename)) + } + // if even, we add one byte of padding to always end on an even byte + if namelen%2 == 0 { + namelen++ + } + + recordSize := 33 + namelen + + b := make([]byte, recordSize, recordSize) + + b[0] = recordSize + b[1] = de.extAttrSize + binary.LittleEndian.PutUint32(b[2:6], de.location) + binary.BigEndian.PutUint32(b[6:10], de.location) + binary.LittleEndian.PutUint32(b[10:14], de.size) + binary.BigEndian.PutUint32(b[14:18], de.size) + copy(b[18:25], timeToBytes(de.creation)) + + // set the flags + var flagByte byte = 0x00 + if de.isHidden { + flagByte = flagByte | 0x01 + } + if de.isSubdirectory { + flagByte = flagByte | 0x02 + } + if de.isAssociated { + flagByte = flagByte | 0x04 + } + if de.hasExtendedAttrs { + flagByte = flagByte | 0x08 + } + if de.hasOwnerGroupPermissions { + flagByte = flagByte | 0x10 + } + if de.hasMoreEntries { + flagByte = flagByte | 0x80 + } + b[25] = flagByte + // volume sequence number - uint16 in both endian + binary.LittleEndian.PutUint16(b[28:30], de.volumeSequence) + binary.BigEndian.PutUint16(b[30:32], de.volumeSequence) + + b[32] = namelen + + // save the filename + var filenameBytes []byte + var err error + switch { + case de.isSelf: + filenameBytes = []byte{0x00} + case de.isParent: + filenameBytes = []byte{0x01} + default: + // first validate the filename + err = validateFilename(de.filename, de.isSubdirectory) + if err != nil { + nametype := "filename" + if de.isSubdirectory { + nametype = "directory" + } + return nil, fmt.Errorf("Invalid %s %s: %v", nametype, de.filename, err) + } + filenameBytes, err = stringToASCIIBytes(de.filename) + if err != nil { + return nil, fmt.Errorf("Error converting filename to bytes: %v", err) + } + } + + // copy it over + copy(b[33:], filenameBytes) + + return b, nil +} + +func dirEntryFromBytes(b []byte) (*directoryEntry, error) { + // has to be at least 34 bytes + if len(b) < int(minDirectoryEntrySize) { + return nil, fmt.Errorf("Cannot read directoryEntry from %d bytes, fewer than minimum of %d bytes", len(b), minDirectoryEntrySize) + } + recordSize := b[0] + // what if it is not the right size? + if len(b) != int(recordSize) { + return nil, fmt.Errorf("directoryEntry should be size %d bytes according to first byte, but have %d bytes", recordSize, len(b)) + } + extAttrSize := b[1] + location := binary.LittleEndian.Uint32(b[2:6]) + size := binary.LittleEndian.Uint32(b[10:14]) + creation := bytesToTime(b[18:25]) + + // get the flags + flagByte := b[25] + isHidden := flagByte&0x01 == 0x01 + isSubdirectory := flagByte&0x02 == 0x02 + isAssociated := flagByte&0x04 == 0x04 + hasExtendedAttrs := flagByte&0x08 == 0x08 + hasOwnerGroupPermissions := flagByte&0x10 == 0x10 + hasMoreEntries := flagByte&0x80 == 0x80 + + volumeSequence := binary.LittleEndian.Uint16(b[28:30]) + + // size includes the ";1" at the end as two bytes and any padding + namelen := b[32] + + // get the filename itself + nameBytes := b[33 : 33+namelen] + if namelen > 1 && nameBytes[namelen-1] == 0x00 { + nameBytes = nameBytes[:namelen-1] + } + var filename string + var isSelf, isParent bool + switch { + case namelen == 1 && nameBytes[0] == 0x00: + filename = "" + isSelf = true + case namelen == 1 && nameBytes[0] == 0x01: + filename = "" + isParent = true + default: + filename = string(nameBytes) + } + + return &directoryEntry{ + extAttrSize: extAttrSize, + location: location, + size: size, + creation: creation, + isHidden: isHidden, + isSubdirectory: isSubdirectory, + isAssociated: isAssociated, + hasExtendedAttrs: hasExtendedAttrs, + hasOwnerGroupPermissions: hasOwnerGroupPermissions, + hasMoreEntries: hasMoreEntries, + isSelf: isSelf, + isParent: isParent, + volumeSequence: volumeSequence, + filename: filename, + }, nil +} + +// parseDirEntries takes all of the bytes in a special file (i.e. a directory) +// and gets all of the DirectoryEntry for that directory +// this is, essentially, the equivalent of `ls -l` or if you prefer `dir` +func parseDirEntries(b []byte, f *FileSystem) ([]*directoryEntry, error) { + dirEntries := make([]*directoryEntry, 0, 20) + count := 0 + for i := 0; i < len(b); count++ { + // empty entry means nothing more to read - this might not actually be accurate, but work with it for now + entryLen := int(b[i+0]) + if entryLen == 0 { + i += (int(f.blocksize) - i%int(f.blocksize)) + continue + } + // get the bytes + de, err := dirEntryFromBytes(b[i+0 : i+entryLen]) + if err != nil { + return nil, fmt.Errorf("Invalid directory entry %d at byte %d: %v", count, i, err) + } + de.filesystem = f + dirEntries = append(dirEntries, de) + i += entryLen + } + return dirEntries, nil +} + +// get the location of a particular path relative to this directory +func (de *directoryEntry) getLocation(p string) (uint32, uint32, error) { + // break path down into parts and levels + parts, err := splitPath(p) + if err != nil { + return 0, 0, fmt.Errorf("Could not parse path: %v", err) + } + var location, size uint32 + if len(parts) == 0 { + location = de.location + size = de.size + } else { + current := parts[0] + // read the directory bytes + dirb := make([]byte, de.size, de.size) + n, err := de.filesystem.file.ReadAt(dirb, int64(de.location)*de.filesystem.blocksize) + if err != nil { + return 0, 0, fmt.Errorf("Could not read directory: %v", err) + } + if n != len(dirb) { + return 0, 0, fmt.Errorf("Read %d bytes instead of expected %d", n, len(dirb)) + } + // parse those entries + dirEntries, err := parseDirEntries(dirb, de.filesystem) + if err != nil { + return 0, 0, fmt.Errorf("Could not parse directory: %v", err) + } + // find the entry among the children that has the desired name + for _, entry := range dirEntries { + if entry.filename == current { + if len(parts) > 1 { + // just dig down further + location, size, err = entry.getLocation(path.Join(parts[1:]...)) + if err != nil { + return 0, 0, fmt.Errorf("Could not get location: %v", err) + } + } else { + // this is the final one, we found it, keep it + location = entry.location + size = entry.size + } + break + } + } + } + + return location, size, nil +} + +// Name() string // base name of the file +func (de *directoryEntry) Name() string { + name := de.filename + // filenames should have the ';1' stripped off, as well as the leading or trailing '.' + if !de.IsDir() { + name = strings.TrimSuffix(name, ";1") + name = strings.TrimSuffix(name, ".") + name = strings.TrimPrefix(name, ".") + } + return name +} + +// Size() int64 // length in bytes for regular files; system-dependent for others +func (de *directoryEntry) Size() int64 { + return int64(de.size) +} + +// Mode() FileMode // file mode bits +func (de *directoryEntry) Mode() os.FileMode { + return 0755 +} + +// ModTime() time.Time // modification time +func (de *directoryEntry) ModTime() time.Time { + return de.creation +} + +// IsDir() bool // abbreviation for Mode().IsDir() +func (de *directoryEntry) IsDir() bool { + return de.isSubdirectory +} + +// Sys() interface{} // underlying data source (can return nil) +func (de *directoryEntry) Sys() interface{} { + return nil +} + +// utilities + +func bytesToTime(b []byte) time.Time { + year := int(b[0]) + month := time.Month(b[1]) + date := int(b[2]) + hour := int(b[3]) + minute := int(b[4]) + second := int(b[5]) + offset := int(int8(b[6])) + location := time.FixedZone("iso", offset*15*60) + return time.Date(year+1900, month, date, hour, minute, second, 0, location) +} + +func timeToBytes(t time.Time) []byte { + year := t.Year() + month := t.Month() + date := t.Day() + second := t.Second() + minute := t.Minute() + hour := t.Hour() + _, offset := t.Zone() + b := make([]byte, 7, 7) + b[0] = byte(year - 1900) + b[1] = byte(month) + b[2] = byte(date) + b[3] = byte(hour) + b[4] = byte(minute) + b[5] = byte(second) + b[6] = byte(int8(offset / 60 / 15)) + return b +} + +// convert a string to ascii bytes, but only accept valid d-characters +func validateFilename(s string, isDir bool) error { + var err error + // return nil, fmt.Errorf("Invalid d-character") + if isDir { + // directory only allowed up to 8 characters of A-Z,0-9,_ + re := regexp.MustCompile("^[A-Z0-9_]{1,30}$") + if !re.MatchString(s) { + err = fmt.Errorf("Directory name must be of up to 30 characters from A-Z0-9_") + } + } else { + // filename only allowed up to 8 characters of A-Z,0-9,_, plus an optional '.' plus up to 3 characters of A-Z,0-9,_, plus must have ";1" + re := regexp.MustCompile("^[A-Z0-9_]+(.[A-Z0-9_]*)?;1$") + switch { + case !re.MatchString(s): + err = fmt.Errorf("File name must be of characters from A-Z0-9_, followed by an optional '.' and an extension of the same characters") + case len(strings.Replace(s, ".", "", -1)) > 30: + err = fmt.Errorf("File name must be at most 30 characters, not including the separator '.'") + } + } + return err +} + +// convert a string to a byte array, if all characters are valid ascii +func stringToASCIIBytes(s string) ([]byte, error) { + length := len(s) + b := make([]byte, length, length) + // convert the name into 11 bytes + r := []rune(s) + // take the first 8 characters + for i := 0; i < length; i++ { + val := int(r[i]) + // we only can handle values less than max byte = 255 + if val > 255 { + return nil, fmt.Errorf("Non-ASCII character in name: %s", s) + } + b[i] = byte(val) + } + return b, nil +} + +// converts a string into upper-case with only valid characters +func uCaseValid(name string) string { + // easiest way to do this is to go through the name one char at a time + r := []rune(name) + r2 := make([]rune, 0, len(r)) + for _, val := range r { + switch { + case (0x30 <= val && val <= 0x39) || (0x41 <= val && val <= 0x5a) || (val == 0x7e): + // naturally valid characters + r2 = append(r2, val) + case (0x61 <= val && val <= 0x7a): + // lower-case characters should be upper-cased + r2 = append(r2, val-32) + case val == ' ' || val == '.': + // remove spaces and periods + continue + default: + // replace the rest with _ + r2 = append(r2, '_') + } + } + return string(r2) +} diff --git a/filesystem/iso9660/directoryentry_internal_test.go b/filesystem/iso9660/directoryentry_internal_test.go new file mode 100644 index 00000000..2da9fcc3 --- /dev/null +++ b/filesystem/iso9660/directoryentry_internal_test.go @@ -0,0 +1,489 @@ +package iso9660 + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "strings" + "testing" + "time" +) + +var ( + timeBytesTests = []struct { + b []byte + rfc string + }{ + // see reference at https://wiki.osdev.org/ISO_9660#Directories + {[]byte{80, 1, 2, 14, 35, 36, 0}, "1980-01-02T14:35:36+00:00"}, + {[]byte{95, 11, 25, 0, 16, 7, 8}, "1995-11-25T00:16:07+02:00"}, + {[]byte{101, 6, 30, 12, 0, 0, 0xe6}, "2001-06-30T12:00:00-06:30"}, + } +) + +func compareDirectoryEntriesIgnoreDates(a, b *directoryEntry) bool { + now := time.Now() + // copy values so we do not mess up the originals + c := &directoryEntry{} + d := &directoryEntry{} + *c = *a + *d = *b + + // unify fields we let be equal + c.creation = now + d.creation = now + return *c == *d +} +func directoryEntryBytesNullDate(a []byte) []byte { + now := make([]byte, 7, 7) + a1 := make([]byte, len(a)) + copy(a1[18:18+7], now) + return a1 +} + +func getValidDirectoryEntries(f *FileSystem) ([]*directoryEntry, []int, [][]byte, []byte, error) { + blocksize := 2048 + rootSector := 18 + // read correct bytes off of disk + input, err := ioutil.ReadFile(ISO9660File) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("Error reading data from iso9660 test fixture %s: %v", ISO9660File, err) + } + + // start of root directory in file.iso - sector 18 + // sector 0-15 - system area + // sector 16 - Primary Volume Descriptor + // sector 17 - Volume Descriptor Set Terimnator + // sector 18 - / (root) directory + // sector 19 - + // sector 20 - /abc directory + // sector 21 - /bar directory + // sector 22 - /foo directory + // sector 23 - /foo directory + // sector 24 - /foo directory + // sector 25 - /foo directory + // sector 26 - /foo directory + // sector 27 - L path table + // sector 28 - M path table + // sector 33-2592 - /ABC/LARGEFILE + // sector 2593-5152 - /BAR/LARGEFILE + // sector 5153 - /FOO/FILENA01 + // .. + // sector 5228 - /FOO/FILENA75 + // sector 5229 - /README.MD + start := rootSector * blocksize // start of root directory in file.iso + + // one block, since we know it is just one block + allBytes := input[start : start+blocksize] + + b := make([][]byte, 0, 8) + + t1 := time.Now() + sizes := []int{0x84, 0x60, 0x6a, 0x6a, 0x6a, 0x78} + entries := []*directoryEntry{ + &directoryEntry{ + extAttrSize: 0, + location: 0x12, + size: 0x800, + creation: t1, + isHidden: false, + isSubdirectory: true, + isAssociated: false, + hasExtendedAttrs: false, + hasOwnerGroupPermissions: false, + hasMoreEntries: false, + volumeSequence: 1, + filename: "", + isSelf: true, + filesystem: f, + }, + &directoryEntry{ + extAttrSize: 0, + location: 0x12, + size: 0x800, + creation: t1, + isHidden: false, + isSubdirectory: true, + isAssociated: false, + hasExtendedAttrs: false, + hasOwnerGroupPermissions: false, + hasMoreEntries: false, + volumeSequence: 1, + filename: "", + isParent: true, + filesystem: f, + }, + &directoryEntry{ + extAttrSize: 0, + location: 0x14, + size: 0x800, + creation: t1, + isHidden: false, + isSubdirectory: true, + isAssociated: false, + hasExtendedAttrs: false, + hasOwnerGroupPermissions: false, + hasMoreEntries: false, + volumeSequence: 1, + filename: "ABC", + filesystem: f, + }, + &directoryEntry{ + extAttrSize: 0, + location: 0x15, + size: 0x800, + creation: t1, + isHidden: false, + isSubdirectory: true, + isAssociated: false, + hasExtendedAttrs: false, + hasOwnerGroupPermissions: false, + hasMoreEntries: false, + volumeSequence: 1, + filename: "BAR", + filesystem: f, + }, + &directoryEntry{ + extAttrSize: 0, + location: 0x16, + size: 0x2800, + creation: t1, + isHidden: false, + isSubdirectory: true, + isAssociated: false, + hasExtendedAttrs: false, + hasOwnerGroupPermissions: false, + hasMoreEntries: false, + volumeSequence: 1, + filename: "FOO", + filesystem: f, + }, + &directoryEntry{ + extAttrSize: 0, + location: 0x146d, + size: 0x3ea, + creation: t1, + isHidden: false, + isSubdirectory: false, + isAssociated: false, + hasExtendedAttrs: false, + hasOwnerGroupPermissions: false, + hasMoreEntries: false, + volumeSequence: 1, + filename: "README.MD;1", + filesystem: f, + }, + } + + read := 0 + for _ = range entries { + recordSize := int(allBytes[read]) + // do we have a 0 point? if so, move ahead until we pass it at the end of the block + if recordSize == 0x00 { + read += (blocksize - read%blocksize) + } + b = append(b, allBytes[read:read+recordSize]) + read += recordSize + } + + return entries, sizes, b, allBytes, nil +} + +func getValidDirectoryEntriesExtended(fs *FileSystem) ([]*directoryEntry, [][]byte, []byte, error) { + // these are taken from the file ./testdata/fat32.img, see ./testdata/README.md + blocksize := 2048 + fooSector := 22 + t1, _ := time.Parse(time.RFC3339, "2017-11-26T07:53:16Z") + sizes := []int{0x60, 0x60, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, + 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, + 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, + 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, 0x7a, + 0x7a, 0x7a, 0x7a, 0x7a, 0x7a} + entries := []*directoryEntry{ + // recordSize, extAttrSize,location,size,creation,isHidden,isSubdirectory,isAssociated,hasExtendedAttrs,hasOwnerGroupPermissions,hasMoreEntries,volumeSequence,filename + {extAttrSize: 0x0, location: 0x16, size: 0x2800, creation: t1, isHidden: false, isSubdirectory: true, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "", isSelf: true}, + {extAttrSize: 0x0, location: 0x12, size: 0x800, creation: t1, isHidden: false, isSubdirectory: true, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "", isParent: true}, + {extAttrSize: 0x0, location: 0x1421, size: 0xb, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA00.;1"}, + {extAttrSize: 0x0, location: 0x1422, size: 0xb, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA01.;1"}, + {extAttrSize: 0x0, location: 0x1423, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA02.;1"}, + {extAttrSize: 0x0, location: 0x1424, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA03.;1"}, + {extAttrSize: 0x0, location: 0x1425, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA04.;1"}, + {extAttrSize: 0x0, location: 0x1426, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA05.;1"}, + {extAttrSize: 0x0, location: 0x1427, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA06.;1"}, + {extAttrSize: 0x0, location: 0x1428, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA07.;1"}, + {extAttrSize: 0x0, location: 0x1429, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA08.;1"}, + {extAttrSize: 0x0, location: 0x142a, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA09.;1"}, + {extAttrSize: 0x0, location: 0x142b, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA10.;1"}, + {extAttrSize: 0x0, location: 0x142c, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA11.;1"}, + {extAttrSize: 0x0, location: 0x142d, size: 0xb, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA12.;1"}, + {extAttrSize: 0x0, location: 0x142e, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA13.;1"}, + {extAttrSize: 0x0, location: 0x142f, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA14.;1"}, + {extAttrSize: 0x0, location: 0x1430, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA15.;1"}, + {extAttrSize: 0x0, location: 0x1431, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA16.;1"}, + {extAttrSize: 0x0, location: 0x1432, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA17.;1"}, + {extAttrSize: 0x0, location: 0x1433, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA18.;1"}, + {extAttrSize: 0x0, location: 0x1434, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA19.;1"}, + {extAttrSize: 0x0, location: 0x1435, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA20.;1"}, + {extAttrSize: 0x0, location: 0x1436, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA21.;1"}, + {extAttrSize: 0x0, location: 0x1437, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA22.;1"}, + {extAttrSize: 0x0, location: 0x1438, size: 0xb, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA23.;1"}, + {extAttrSize: 0x0, location: 0x1439, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA24.;1"}, + {extAttrSize: 0x0, location: 0x143a, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA25.;1"}, + {extAttrSize: 0x0, location: 0x143b, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA26.;1"}, + {extAttrSize: 0x0, location: 0x143c, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA27.;1"}, + {extAttrSize: 0x0, location: 0x143d, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA28.;1"}, + {extAttrSize: 0x0, location: 0x143e, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA29.;1"}, + {extAttrSize: 0x0, location: 0x143f, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA30.;1"}, + {extAttrSize: 0x0, location: 0x1440, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA31.;1"}, + {extAttrSize: 0x0, location: 0x1441, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA32.;1"}, + {extAttrSize: 0x0, location: 0x1442, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA33.;1"}, + {extAttrSize: 0x0, location: 0x1443, size: 0xb, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA34.;1"}, + {extAttrSize: 0x0, location: 0x1444, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA35.;1"}, + {extAttrSize: 0x0, location: 0x1445, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA36.;1"}, + {extAttrSize: 0x0, location: 0x1446, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA37.;1"}, + {extAttrSize: 0x0, location: 0x1447, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA38.;1"}, + {extAttrSize: 0x0, location: 0x1448, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA39.;1"}, + {extAttrSize: 0x0, location: 0x1449, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA40.;1"}, + {extAttrSize: 0x0, location: 0x144a, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA41.;1"}, + {extAttrSize: 0x0, location: 0x144b, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA42.;1"}, + {extAttrSize: 0x0, location: 0x144c, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA43.;1"}, + {extAttrSize: 0x0, location: 0x144d, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA44.;1"}, + {extAttrSize: 0x0, location: 0x144e, size: 0xb, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA45.;1"}, + {extAttrSize: 0x0, location: 0x144f, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA46.;1"}, + {extAttrSize: 0x0, location: 0x1450, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA47.;1"}, + {extAttrSize: 0x0, location: 0x1451, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA48.;1"}, + {extAttrSize: 0x0, location: 0x1452, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA49.;1"}, + {extAttrSize: 0x0, location: 0x1453, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA50.;1"}, + {extAttrSize: 0x0, location: 0x1454, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA51.;1"}, + {extAttrSize: 0x0, location: 0x1455, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA52.;1"}, + {extAttrSize: 0x0, location: 0x1456, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA53.;1"}, + {extAttrSize: 0x0, location: 0x1457, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA54.;1"}, + {extAttrSize: 0x0, location: 0x1458, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA55.;1"}, + {extAttrSize: 0x0, location: 0x1459, size: 0xb, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA56.;1"}, + {extAttrSize: 0x0, location: 0x145a, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA57.;1"}, + {extAttrSize: 0x0, location: 0x145b, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA58.;1"}, + {extAttrSize: 0x0, location: 0x145c, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA59.;1"}, + {extAttrSize: 0x0, location: 0x145d, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA60.;1"}, + {extAttrSize: 0x0, location: 0x145e, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA61.;1"}, + {extAttrSize: 0x0, location: 0x145f, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA62.;1"}, + {extAttrSize: 0x0, location: 0x1460, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA63.;1"}, + {extAttrSize: 0x0, location: 0x1461, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA64.;1"}, + {extAttrSize: 0x0, location: 0x1462, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA65.;1"}, + {extAttrSize: 0x0, location: 0x1463, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA66.;1"}, + {extAttrSize: 0x0, location: 0x1464, size: 0xb, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA67.;1"}, + {extAttrSize: 0x0, location: 0x1465, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA68.;1"}, + {extAttrSize: 0x0, location: 0x1466, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA69.;1"}, + {extAttrSize: 0x0, location: 0x1467, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA70.;1"}, + {extAttrSize: 0x0, location: 0x1468, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA71.;1"}, + {extAttrSize: 0x0, location: 0x1469, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA72.;1"}, + {extAttrSize: 0x0, location: 0x146a, size: 0xc, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA73.;1"}, + {extAttrSize: 0x0, location: 0x146b, size: 0xb, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA74.;1"}, + {extAttrSize: 0x0, location: 0x146c, size: 0xb, creation: t1, isHidden: false, isSubdirectory: false, isAssociated: false, hasExtendedAttrs: false, hasOwnerGroupPermissions: false, hasMoreEntries: false, volumeSequence: 0x1, filename: "FILENA75.;1"}, + } + + for _, e := range entries { + e.filesystem = fs + } + // read correct bytes off of disk + input, err := ioutil.ReadFile(ISO9660File) + if err != nil { + return nil, nil, nil, fmt.Errorf("Error reading data from iso9660 test fixture %s: %v", ISO9660File, err) + } + + start := fooSector * blocksize // start of /foo directory in file.iso + + // five blocks, since we know it is five blocks + allBytes := input[start : start+5*blocksize] + + b := make([][]byte, 0, len(entries)) + read := 0 + for i := range entries { + recordSize := sizes[i] + // do we have a 0 point? if so, move ahead until we pass it at the end of the block + if allBytes[read] == 0x00 { + read += (blocksize - read%blocksize) + } + b = append(b, allBytes[read:read+recordSize]) + read += recordSize + } + return entries, b, allBytes, nil +} + +func TestBytesToTime(t *testing.T) { + for _, tt := range timeBytesTests { + output := bytesToTime(tt.b) + expected, err := time.Parse(time.RFC3339, tt.rfc) + if err != nil { + t.Fatalf("Error parsing expected date: %v", err) + } + if !expected.Equal(output) { + t.Errorf("bytesToTime(%d) expected output %v, actual %v", tt.b, expected, output) + } + } +} + +func TestTimeToBytes(t *testing.T) { + for _, tt := range timeBytesTests { + input, err := time.Parse(time.RFC3339, tt.rfc) + if err != nil { + t.Fatalf("Error parsing input date: %v", err) + } + b := timeToBytes(input) + if bytes.Compare(b, tt.b) != 0 { + t.Errorf("timeToBytes(%v) expected output %x, actual %x", tt.rfc, tt.b, b) + } + } + +} + +func TestDirectoryEntryStringToASCIIBytes(t *testing.T) { + tests := []struct { + input string + output []byte + err error + }{ + {"abc", []byte{0x61, 0x62, 0x63}, nil}, + {"abcdefg", []byte{0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67}, nil}, + {"abcdef\u2318", nil, fmt.Errorf("Non-ASCII character in name: %s", "abcdef\u2318")}, + } + for _, tt := range tests { + output, err := stringToASCIIBytes(tt.input) + if bytes.Compare(output, tt.output) != 0 { + t.Errorf("stringToASCIIBytes(%s) expected output %v, actual %v", tt.input, tt.output, output) + } + if (err != nil && tt.err == nil) || (err == nil && tt.err != nil) || (err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error())) { + t.Errorf("mismatched err expected, actual: %v, %v", tt.err, err) + } + } + +} + +func TestDirectoryEntryUCaseValid(t *testing.T) { + tests := []struct { + input string + output string + }{ + {"abc", "ABC"}, + {"ABC", "ABC"}, + {"aBC", "ABC"}, + {"a15D", "A15D"}, + {"A BC", "ABC"}, + {"A..-a*)82y12112bb", "A_A__82Y12112BB"}, + } + for _, tt := range tests { + output := uCaseValid(tt.input) + if output != tt.output { + t.Errorf("uCaseValid(%s) expected %s actual %s", tt.input, tt.output, output) + } + } +} + +func TestDirectoryEntryParseDirEntries(t *testing.T) { + fs := &FileSystem{blocksize: 2048} + validDe, _, _, b, err := getValidDirectoryEntries(fs) + if err != nil { + t.Fatal(err) + } + tests := []struct { + de []*directoryEntry + b []byte + err error + }{ + {validDe, b, nil}, + } + + for _, tt := range tests { + output, err := parseDirEntries(tt.b, fs) + switch { + case (err != nil && tt.err == nil) || (err == nil && tt.err != nil) || (err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error())): + t.Log(err) + t.Log(tt.err) + t.Errorf("mismatched err expected, actual: %v, %v", tt.err, err) + case (output == nil && tt.de != nil) || (tt.de == nil && output != nil): + t.Errorf("parseDirEntries() DirectoryEntry mismatched nil actual, expected %v %v", output, tt.de) + case len(output) != len(tt.de): + t.Errorf("parseDirEntries() DirectoryEntry mismatched length actual, expected %d %d", len(output), len(tt.de)) + default: + for i, de := range output { + if !compareDirectoryEntriesIgnoreDates(de, tt.de[i]) { + t.Errorf("%d: parseDirEntries() DirectoryEntry mismatch, actual then valid:", i) + t.Logf("%#v\n", de) + t.Logf("%#v\n", tt.de[i]) + } + } + } + } + +} + +func TestDirectoryEntryToBytes(t *testing.T) { + validDe, sizes, validBytes, _, err := getValidDirectoryEntries(nil) + if err != nil { + t.Fatal(err) + } + for i, de := range validDe { + b, err := de.toBytes() + switch { + case err != nil: + t.Errorf("Error converting directory entry to bytes: %v", err) + t.Logf("%v", de) + case int(b[0]) != len(b): + t.Errorf("Reported size as %d but had %d bytes", b[0], len(b)) + default: + // set the byte sizes to what we expect from disk + b[0] = uint8(sizes[i]) + if bytes.Compare(directoryEntryBytesNullDate(b), directoryEntryBytesNullDate(validBytes[i][:len(b)])) != 0 { + t.Errorf("Mismatched bytes %s, actual vs expected", de.filename) + t.Log(b) + t.Log(validBytes[i]) + } + } + } +} + +func TestDirectoryEntryGetLocation(t *testing.T) { + // directoryEntryGetLocation(p string) (uint32, uint32, error) { + tests := []struct { + input string + output uint32 + err error + }{ + {"/", 18, nil}, + {"/ABC", 20, nil}, + {"/FOO", 22, nil}, + {"/NOTHERE", 0, nil}, + } + + f, err := os.Open(ISO9660File) + if err != nil { + t.Fatalf("Could not open iso testing file %s: %v", ISO9660File, err) + } + // the root directory entry + root := &directoryEntry{ + extAttrSize: 0, + location: 0x12, + size: 0x800, + creation: time.Now(), + isHidden: false, + isSubdirectory: true, + isAssociated: false, + hasExtendedAttrs: false, + hasOwnerGroupPermissions: false, + hasMoreEntries: false, + volumeSequence: 1, + filename: string(0x00), + filesystem: &FileSystem{blocksize: 2048, file: f}, + } + + for _, tt := range tests { + // root directory entry needs a filesystem or this will error out + output, _, err := root.getLocation(tt.input) + if output != tt.output { + t.Errorf("directoryEntry.getLocation(%s) expected output %d, actual %d", tt.input, tt.output, output) + } + if (err != nil && tt.err == nil) || (err == nil && tt.err != nil) || (err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error())) { + t.Errorf("mismatched err expected, actual: %v, %v", tt.err, err) + } + } +} diff --git a/filesystem/iso9660/doc.go b/filesystem/iso9660/doc.go new file mode 100644 index 00000000..638f4513 --- /dev/null +++ b/filesystem/iso9660/doc.go @@ -0,0 +1,5 @@ +// Package iso9660 provides utilities to interact with, manipulate and create an iso9660 filesystem on a block device or +// a disk image. +// +// +package iso9660 diff --git a/filesystem/iso9660/file.go b/filesystem/iso9660/file.go new file mode 100644 index 00000000..323e566f --- /dev/null +++ b/filesystem/iso9660/file.go @@ -0,0 +1,77 @@ +package iso9660 + +import ( + "fmt" + "io" +) + +// File represents a single file in an iso9660 filesystem +// it is NOT used when working in a workspace, where we just use the underlying OS +type File struct { + *directoryEntry + isReadWrite bool + isAppend bool + offset int64 +} + +// Read reads up to len(b) bytes from the File. +// It returns the number of bytes read and any error encountered. +// At end of file, Read returns 0, io.EOF +// reads from the last known offset in the file from last read or write +// use Seek() to set at a particular point +func (fl *File) Read(b []byte) (int, error) { + // we have the DirectoryEntry, so we can get the starting location and size + // since iso9660 files are contiguous, we only need the starting location and size + // to get the entire file + fs := fl.filesystem + size := int(fl.size) - int(fl.offset) + location := int(fl.location) + maxRead := size + file := fs.file + + // if there is nothing left to read, just return EOF + if size <= 0 { + return 0, io.EOF + } + + // we stop when we hit the lesser of + // 1- len(b) + // 2- file end + if len(b) < maxRead { + maxRead = len(b) + } + + // just read the requested number of bytes and change our offset + file.ReadAt(b[0:maxRead], int64(location)*fs.blocksize+int64(fl.offset)) + + fl.offset = fl.offset + int64(maxRead) + var retErr error + if fl.offset >= int64(size) { + retErr = io.EOF + } + return maxRead, retErr +} + +// Write writes len(b) bytes to the File. +// you cannot write to an iso, so this returns an error +func (fl *File) Write(p []byte) (int, error) { + return 0, fmt.Errorf("Cannot write to a read-only iso filesystem") +} + +// Seek set the offset to a particular point in the file +func (fl *File) Seek(offset int64, whence int) (int64, error) { + newOffset := int64(0) + switch whence { + case io.SeekStart: + newOffset = offset + case io.SeekEnd: + newOffset = int64(fl.size) + offset + case io.SeekCurrent: + newOffset = fl.offset + offset + } + if newOffset < 0 { + return fl.offset, fmt.Errorf("Cannot set offset %d before start of file", offset) + } + fl.offset = newOffset + return fl.offset, nil +} diff --git a/filesystem/iso9660/file_test.go b/filesystem/iso9660/file_test.go new file mode 100644 index 00000000..2f8da880 --- /dev/null +++ b/filesystem/iso9660/file_test.go @@ -0,0 +1,40 @@ +package iso9660_test + +import ( + "io" + "testing" + + "github.com/deitch/diskfs/filesystem/iso9660" +) + +func TestFileRead(t *testing.T) { + // pretty simple: never should be able to write as it is a read-only filesystem + // we use + f, content := iso9660.GetTestFile(t) + + b := make([]byte, 20, 20) + read, err := f.Read(b) + if read != 0 && err != io.EOF { + t.Errorf("received unexpected error when reading: %v", err) + } + if read != len(content) { + t.Errorf("read %d bytes instead of expected %d", read, len(content)) + } + bString := string(b[:read]) + if bString != content { + t.Errorf("Mismatched content:\nActual: '%s'\nExpected: '%s'", bString, content) + } +} + +func TestFileWrite(t *testing.T) { + // pretty simple: never should be able to write as it is a read-only filesystem + f := &iso9660.File{} + b := make([]byte, 8, 8) + written, err := f.Write(b) + if err == nil { + t.Errorf("received no error when should have been prevented from writing") + } + if written != 0 { + t.Errorf("wrote %d bytes instead of expected %d", written, 0) + } +} diff --git a/filesystem/iso9660/finalize.go b/filesystem/iso9660/finalize.go new file mode 100644 index 00000000..acb4e90f --- /dev/null +++ b/filesystem/iso9660/finalize.go @@ -0,0 +1,493 @@ +package iso9660 + +import ( + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/deitch/diskfs/util" +) + +// finalizeFileInfo is a file info useful for finalization +// fulfills os.FileInfo +// Name() string // base name of the file +// Size() int64 // length in bytes for regular files; system-dependent for others +// Mode() FileMode // file mode bits +// ModTime() time.Time // modification time +// IsDir() bool // abbreviation for Mode().IsDir() +// Sys() interface{} // underlying data source (can return nil) +type finalizeFileInfo struct { + path string + shortname string + extension string + location uint32 + blocks uint32 + recordSize uint8 + depth int + name string + size int64 + mode os.FileMode + modTime time.Time + isDir bool + parent *finalizeFileInfo + children []*finalizeFileInfo +} + +func (fi *finalizeFileInfo) Name() string { + // we are using plain iso9660 (without extensions), so just shortname possibly with extension + ret := fi.shortname + if !fi.isDir { + ret = fmt.Sprintf("%s.%s;1", fi.shortname, fi.extension) + } + // shortname already is ucased + return ret +} +func (fi *finalizeFileInfo) Size() int64 { + return fi.size +} +func (fi *finalizeFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi *finalizeFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi *finalizeFileInfo) IsDir() bool { + return fi.isDir +} +func (fi *finalizeFileInfo) Sys() interface{} { + return nil +} + +func (fi *finalizeFileInfo) toDirectoryEntry(fs *FileSystem, isSelf, isParent bool) *directoryEntry { + return &directoryEntry{ + extAttrSize: 0, + location: fi.location, + size: uint32(fi.Size()), + creation: fi.ModTime(), + isHidden: false, + isSubdirectory: fi.IsDir(), + isAssociated: false, + hasExtendedAttrs: false, + hasOwnerGroupPermissions: false, + hasMoreEntries: false, + isSelf: isSelf, + isParent: isParent, + volumeSequence: 1, + filesystem: fs, + filename: fi.Name(), + } +} + +// sort all of the directory children recursively - this is for ordering into blocks +func (fi *finalizeFileInfo) collapseAndSortChildren(depth int) ([]*finalizeFileInfo, []*finalizeFileInfo) { + dirs := make([]*finalizeFileInfo, 0) + files := make([]*finalizeFileInfo, 0) + // first extract all of the directories + for _, e := range fi.children { + if e.IsDir() { + dirs = append(dirs, e) + e.parent = fi + e.depth = depth + 1 + } else { + files = append(files, e) + } + } + + // next sort them + sort.Slice(dirs, func(i, j int) bool { + // just sort by filename; as good as anything else + return dirs[i].Name() < dirs[j].Name() + }) + sort.Slice(files, func(i, j int) bool { + // just sort by filename; as good as anything else + return files[i].Name() < files[j].Name() + }) + // finally add in the children going down + finalDirs := make([]*finalizeFileInfo, 0) + finalFiles := files + for _, e := range dirs { + finalDirs = append(finalDirs, e) + // now get any children + d, f := e.collapseAndSortChildren(depth + 1) + finalDirs = append(finalDirs, d...) + finalFiles = append(finalFiles, f...) + } + return finalDirs, finalFiles +} + +func finalizeFileInfoNames(fi []*finalizeFileInfo) []string { + ret := make([]string, len(fi)) + for i, v := range fi { + ret[i] = v.name + } + return ret +} + +// Finalize finalize a read-only filesystem by writing it out to a read-only format +func (fs *FileSystem) Finalize() error { + if fs.workspace == "" { + return fmt.Errorf("Cannot finalize an already finalized filesystem") + } + + /* + There is nothing in the iso9660 spec about the order of directories and files, + other than that they must be accessible in the location specified in directory entry and/or path table + However, most implementations seem to it as follows: + - each directory follows its parent + - data (i.e. file) sectors in each directory are immediately after its directory and immediately before the next sibling directory to its parent + + to keep it simple, we will follow what xorriso/mkisofs on linux does, in the following order: + - volume descriptor set, beginning at sector 16 + - root directory entry + - all other directory entries, sorted alphabetically, depth first + - L path table + - M path table + - data sectors for files, sorted alphabetically, matching order of directories + + this is where we build our filesystem + 1- blank out sectors 0-15 for system use + 2- skip sectors 16-17 for PVD and terminator (fill later) + 3- calculate how many sectors required for root directory + 4- calculate each child directory, working our way down, including number of sectors and location + 5- write path tables (L & M) + 6- write files for root directory + 7- write root directory entry into its sector (18) + 8- repeat steps 6&7 for all other directories + 9- write PVD + 10- write volume descriptor set terminator + */ + + f := fs.file + blocksize := int(fs.blocksize) + + // 1- blank out sectors 0-15 + b := make([]byte, 15*fs.blocksize) + n, err := f.WriteAt(b, 0) + if err != nil { + return fmt.Errorf("Could not write blank system area: %v", err) + } + if n != len(b) { + return fmt.Errorf("Only wrote %d bytes instead of expected %d to system area", n, len(b)) + } + + // 2- skip sectors 16-17 for PVD and terminator (fill later) + + // 3- build out file tree + cwd, err := os.Getwd() + if err != nil { + return fmt.Errorf("Could not get pwd: %v", err) + } + os.Chdir(fs.Workspace()) + fileList := make([]*finalizeFileInfo, 0, 20) + dirList := make(map[string]*finalizeFileInfo) + var entry *finalizeFileInfo + filepath.Walk(".", func(fp string, fi os.FileInfo, err error) error { + isRoot := fp == "." + parts := strings.SplitN(fi.Name(), ".", 2) + shortname := parts[0] + extension := "" + if len(parts) > 1 { + extension = parts[1] + } + // shortname and extension must be upper-case + shortname = strings.ToUpper(shortname) + extension = strings.ToUpper(extension) + + name := fi.Name() + if isRoot { + name = string([]byte{0x00}) + shortname = name + } + entry = &finalizeFileInfo{path: fp, name: name, isDir: fi.IsDir(), modTime: fi.ModTime(), mode: fi.Mode(), size: fi.Size(), shortname: shortname} + + // we will have to save it as its parent + parentDir := filepath.Dir(fp) + parentDirInfo := dirList[parentDir] + + if fi.IsDir() { + entry.children = make([]*finalizeFileInfo, 0, 20) + dirList[fp] = entry + if !isRoot { + parentDirInfo.children = append(parentDirInfo.children, entry) + dirList[parentDir] = parentDirInfo + } + } else { + // calculate blocks + size := fi.Size() + blocks := uint32(size / fs.blocksize) + // add one for partial? + if size%fs.blocksize > 0 { + blocks++ + } + entry.extension = extension + entry.blocks = blocks + fileList = append(fileList, entry) + parentDirInfo.children = append(parentDirInfo.children, entry) + dirList[parentDir] = parentDirInfo + } + return nil + }) + + // we now have list of all of the files and directories and their properties, as well as children of every directory + // calculate the sizes of the directories + for _, dir := range dirList { + size := 0 + // add for self and parent + size += 34 + 34 + for _, e := range dir.children { + // calculate the size of the entry + namelen := len(e.shortname) + if !e.IsDir() { + // add 1 for the separator '.' and 2 for ';1' + namelen += 1 + len(e.extension) + 2 + } + if namelen%2 == 0 { + namelen++ + } + // add name size to the fixed record size - for now just 33 + recordSize := namelen + 33 + e.recordSize = uint8(recordSize) + // do not go over a block boundary; pad if necessary + newlength := size + recordSize + left := blocksize - size%blocksize + if left != 0 && newlength/blocksize > size/blocksize { + size += left + } + size += recordSize + } + // now we have the total size of the entrylist for this directory - calculate the blocks + blocks := uint32(size / blocksize) + // add one? + if size%blocksize > 0 { + blocks++ + } + dir.size = int64(size) + dir.blocks = blocks + } + + // we have the list of all files and directories, and the number of blocks required to store each + // now just sort and store them, beginning with root + dirs := make([]*finalizeFileInfo, 0, 20) + root := dirList["."] + dirs = append(dirs, root) + subdirs, files := root.collapseAndSortChildren(1) + dirs = append(dirs, subdirs...) + + // we now have sorted list of block order, with sizes and number of blocks on each + // next assign the blocks to each, and then we can enter the data in the directory entries + totalSize := uint32(0) + // totalSize includes the system area + totalSize += 16 * uint32(blocksize) + location := uint32(18) + for _, e := range dirs { + e.location = location + location += e.blocks + } + + // create the pathtables (L & M) + // with the list of directories, we can make a path table + pathTable := createPathTable(dirs) + // how big is the path table? we will take LSB for now, because they are the same size + pathTableLBytes := pathTable.toLBytes() + pathTableMBytes := pathTable.toMBytes() + pathTableSize := len(pathTableLBytes) + pathTableBlocks := uint32(pathTableSize / blocksize) + if pathTableSize%blocksize > 0 { + pathTableBlocks++ + } + // we do not do optional path tables yet + pathTableLLocation := location + location += pathTableBlocks + pathTableMLocation := location + location += pathTableBlocks + + for _, e := range files { + e.location = location + location += e.blocks + } + + // now we can write each one out - dirs first then files + for _, e := range dirs { + writeAt := int64(e.location) * int64(blocksize) + // also need to add self and parent to it + self := e.toDirectoryEntry(fs, true, false) + parent := &directoryEntry{} + if e.parent == nil { + *parent = *self + parent.isSelf = false + parent.isParent = true + } else { + parent = e.parent.toDirectoryEntry(fs, false, true) + } + entries := []*directoryEntry{self, parent} + for _, child := range e.children { + entries = append(entries, child.toDirectoryEntry(fs, false, false)) + } + d := &Directory{ + directoryEntry: *self, + entries: entries, + } + // Directory.toBytes() always returns whole blocks + p, err := d.entriesToBytes() + totalSize += uint32(len(b)) + if err != nil { + return fmt.Errorf("Could not convert directory to bytes: %v", err) + } + f.WriteAt(p, writeAt) + } + + // now write out the path tables, L & M + writeAt := int64(pathTableLLocation) * int64(blocksize) + f.WriteAt(pathTableLBytes, writeAt) + writeAt = int64(pathTableMLocation) * int64(blocksize) + f.WriteAt(pathTableMBytes, writeAt) + + for _, e := range files { + writeAt := int64(e.location) * int64(blocksize) + // for file, just copy the data across + from, err := os.Open(e.path) + if err != nil { + return fmt.Errorf("failed to open file for reading %s: %v", e.path, err) + } + defer from.Close() + copied, err := copyFileData(from, f, 0, writeAt) + if err != nil { + return fmt.Errorf("failed to copy file to disk %s: %v", e.path, err) + } + if copied != int(e.Size()) { + return fmt.Errorf("error copying file %s to disk, copied %d bytes, expected %d", e.path, copied, e.Size()) + } + totalSize += e.blocks * uint32(blocksize) + } + + // create and write the primary volume descriptor and the volume descriptor set terminator + now := time.Now() + pvd := &primaryVolumeDescriptor{ + systemIdentifier: "", + volumeIdentifier: "ISOIMAGE", + volumeSize: uint32(totalSize), + setSize: 1, + sequenceNumber: 1, + blocksize: uint16(fs.blocksize), + pathTableSize: uint32(pathTableSize), + pathTableLLocation: pathTableLLocation, + pathTableLOptionalLocation: 0, + pathTableMLocation: pathTableMLocation, + pathTableMOptionalLocation: 0, + volumeSetIdentifier: "", + publisherIdentifier: "", + preparerIdentifier: util.AppNameVersion, + applicationIdentifier: "", + copyrightFile: "", // 37 bytes + abstractFile: "", // 37 bytes + bibliographicFile: "", // 37 bytes + creation: now, + modification: now, + expiration: now, + effective: now, + rootDirectoryEntry: root.toDirectoryEntry(fs, true, false), + } + b = pvd.toBytes() + f.WriteAt(b, 16*int64(blocksize)) + terminator := &terminatorVolumeDescriptor{} + b = terminator.toBytes() + f.WriteAt(b, 17*int64(blocksize)) + + // reset the workspace + os.Chdir(cwd) + + // finish by setting as finalized + fs.workspace = "" + return nil +} + +func copyFileData(from, to util.File, fromOffset, toOffset int64) (int, error) { + buf := make([]byte, 2048) + copied := 0 + for { + n, err := from.ReadAt(buf, fromOffset+int64(copied)) + if err != nil && err != io.EOF { + return copied, err + } + if n == 0 { + break + } + + if _, err := to.WriteAt(buf[:n], toOffset+int64(copied)); err != nil { + return copied, err + } + copied += n + } + return copied, nil +} + +// sort path table entries +func sortFinalizeFileInfoPathTable(left, right *finalizeFileInfo) bool { + switch { + case left.parent == right.parent: + // same parents = same depth, just sort on name + lname := left.Name() + rname := right.Name() + maxLen := maxInt(len(lname), len(rname)) + format := fmt.Sprintf("%%-%ds", maxLen) + return fmt.Sprintf(format, lname) < fmt.Sprintf(format, rname) + case left.depth < right.depth: + // different parents with different depth, lower first + return true + case right.depth > left.depth: + return false + case left.parent == nil && right.parent != nil: + return true + case left.parent != nil && right.parent == nil: + return false + default: + // same depth, different parents, it depends on the sort order of the parents + return sortFinalizeFileInfoPathTable(left.parent, right.parent) + } +} + +// create a path table from a slice of *finalizeFileInfo that are directories +func createPathTable(fi []*finalizeFileInfo) *pathTable { + // copy so we do not modify the original + fs := make([]*finalizeFileInfo, len(fi)) + copy(fs, fi) + // sort via the rules + sort.Slice(fs, func(i, j int) bool { + return sortFinalizeFileInfoPathTable(fs[i], fs[j]) + }) + indexMap := make(map[*finalizeFileInfo]int) + // now that it is sorted, create the ordered path table entries + entries := make([]*pathTableEntry, 0) + for i, e := range fs { + name := e.Name() + nameSize := len(name) + size := 8 + uint16(nameSize) + if nameSize%2 != 0 { + size++ + } + ownIndex := i + 1 + indexMap[e] = ownIndex + // root just points to itself + parentIndex := ownIndex + if ip, ok := indexMap[e.parent]; ok { + parentIndex = ip + } + pte := &pathTableEntry{ + nameSize: uint8(nameSize), + size: size, + extAttrLength: 0, + location: e.location, + parentIndex: uint16(parentIndex), + dirname: name, + } + entries = append(entries, pte) + } + return &pathTable{ + records: entries, + } + +} diff --git a/filesystem/iso9660/finalize_internal_test.go b/filesystem/iso9660/finalize_internal_test.go new file mode 100644 index 00000000..953892af --- /dev/null +++ b/filesystem/iso9660/finalize_internal_test.go @@ -0,0 +1,201 @@ +package iso9660 + +import ( + "bytes" + "crypto/rand" + "fmt" + "io/ioutil" + "os" + "testing" +) + +func TestCopyFileData(t *testing.T) { + // create an empty file as source + from, err := ioutil.TempFile("", "iso9660_finalize_test_from") + if err != nil { + t.Fatal("error creating 'from' tmpfile", err) + } + + defer os.Remove(from.Name()) // clean up + + // create some random data + // 100KB is fine + blen := 1024 * 100 + b := make([]byte, blen) + _, err = rand.Read(b) + if err != nil { + t.Fatal("error getting random bytes:", err) + } + + if _, err = from.Write(b); err != nil { + t.Fatal("Error writing random bytes to 'from' tmpfile", err) + } + + // create a target file + to, err := ioutil.TempFile("", "iso9660_finalize_test_to") + if err != nil { + t.Fatal("error creating 'to' tmpfile", err) + } + defer os.Remove(from.Name()) // clean up + + copied, err := copyFileData(from, to, 0, 0) + if err != nil { + t.Fatal("error copying data from/to", err) + } + expected := blen + if copied != expected { + t.Fatalf("copied %d bytes instead of expected %d", copied, blen) + } + + _, err = to.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatal("Error resetting 'to' file", err) + } + c := make([]byte, blen) + if _, err = to.Read(c); err != nil { + t.Fatal("Error reading 'to' tmpfile", err) + } + + if bytes.Compare(b, c) != 0 { + t.Fatalf("Mismatched content between 'from' and 'to' files, 'from' then 'to'\n%#x\n%#x", b, c) + } + + if err := from.Close(); err != nil { + t.Fatal("error closing 'from' tmpfile", err) + } + if err := to.Close(); err != nil { + t.Fatal("error closing 'to' tmpfile", err) + } +} + +func TestSortFinalizeFileInfoPathTable(t *testing.T) { + tests := []struct { + left *finalizeFileInfo + right *finalizeFileInfo + less bool + }{ + {&finalizeFileInfo{parent: nil, depth: 3, name: "ABC", shortname: "ABC"}, &finalizeFileInfo{parent: nil, depth: 3, name: "XYZ", shortname: "DEF"}, true}, // same parent, should sort by name + {&finalizeFileInfo{parent: nil, depth: 3, name: "XYZ", shortname: "XYZ"}, &finalizeFileInfo{parent: nil, depth: 3, name: "ABC", shortname: "ABC"}, false}, // same parent, should sort by name + {&finalizeFileInfo{parent: &finalizeFileInfo{}, depth: 3, name: "ABC", shortname: "ABC"}, &finalizeFileInfo{parent: &finalizeFileInfo{}, depth: 4, name: "ABC", shortname: "ABC"}, true}, // different parents, should sort by depth + {&finalizeFileInfo{parent: &finalizeFileInfo{}, depth: 4, name: "ABC", shortname: "ABC"}, &finalizeFileInfo{parent: &finalizeFileInfo{}, depth: 3, name: "ABC", shortname: "ABC"}, false}, // different parents, should sort by depth + {&finalizeFileInfo{parent: &finalizeFileInfo{parent: nil, name: "AAA", shortname: "AAA"}, depth: 3, name: "ABC", shortname: "ABC"}, &finalizeFileInfo{parent: &finalizeFileInfo{parent: nil, name: "ZZZ", shortname: "ZZZ"}, depth: 3, name: "ABC", shortname: "ABC"}, true}, // different parents, same depth, should sort by parent + {&finalizeFileInfo{parent: &finalizeFileInfo{parent: nil, name: "ZZZ", shortname: "ZZZ"}, depth: 3, name: "ABC", shortname: "ABC"}, &finalizeFileInfo{parent: &finalizeFileInfo{parent: nil, name: "AAA", shortname: "AAA"}, depth: 3, name: "ABC", shortname: "ABC"}, false}, // different parents, same depth, should sort by parent + } + for i, tt := range tests { + result := sortFinalizeFileInfoPathTable(tt.left, tt.right) + if result != tt.less { + t.Errorf("%d: got %v expected %v", i, result, tt.less) + } + } +} + +func TestCreatePathTable(t *testing.T) { + // uses name, parent, location + root := &finalizeFileInfo{name: "", location: 16, isDir: true} + root.parent = root + fives := &finalizeFileInfo{name: "FIVES", shortname: "FIVES", location: 22, parent: root, isDir: true} + tens := &finalizeFileInfo{name: "TENLETTERS", shortname: "TENLETTERS", location: 17, parent: root, isDir: true} + subFives := &finalizeFileInfo{name: "SUBOFFIVES12", shortname: "SUBOFFIVES12", location: 45, parent: fives, isDir: true} + subTen := &finalizeFileInfo{name: "SHORT", shortname: "SHORT", location: 32, parent: tens, isDir: true} + input := []*finalizeFileInfo{subTen, fives, root, tens, subFives} + expected := &pathTable{ + records: []*pathTableEntry{ + {nameSize: 0, size: 8, extAttrLength: 0, location: 16, parentIndex: 1, dirname: ""}, + {nameSize: 5, size: 14, extAttrLength: 0, location: 22, parentIndex: 1, dirname: "FIVES"}, + {nameSize: 10, size: 18, extAttrLength: 0, location: 17, parentIndex: 1, dirname: "TENLETTERS"}, + {nameSize: 12, size: 20, extAttrLength: 0, location: 45, parentIndex: 2, dirname: "SUBOFFIVES12"}, + {nameSize: 5, size: 14, extAttrLength: 0, location: 32, parentIndex: 3, dirname: "SHORT"}, + }, + } + pt := createPathTable(input) + // createPathTable(fi []*finalizeFileInfo) *pathTable + if !pt.equal(expected) { + t.Errorf("pathTable not as expected, actual then expected\n%#v\n%#v", pt.names(), expected.names()) + } +} + +func TestCollapseAndSortChildren(t *testing.T) { + // we need to build a file tree, and then see that the results are correct and in order + // the algorithm uses the following properties of finalizeFileInfo: + // isDir, children, name, shortname + // the algorithm is supposed to sort by name in each node, and depth first + root := &finalizeFileInfo{name: ".", depth: 1, isDir: true} + children := []*finalizeFileInfo{ + {name: "ABC", shortname: "ABC", isDir: false}, + {name: "DEF", shortname: "DEF", isDir: true}, + {name: "TWODEEP", shortname: "TWODEEP", isDir: true, children: []*finalizeFileInfo{ + {name: "TWODEEP1", shortname: "TWODEEP1", isDir: false}, + {name: "TWODEEP3", shortname: "TWODEEP3", isDir: true, children: []*finalizeFileInfo{ + {name: "TWODEEP33", shortname: "TWODEEP33", isDir: false}, + {name: "TWODEEP31", shortname: "TWODEEP31", isDir: false}, + {name: "TWODEEP32", shortname: "TWODEEP32", isDir: true}, + }}, + {name: "TWODEEP2", shortname: "TWODEEP2", isDir: false}, + }}, + {name: "README.MD", shortname: "README.MD", isDir: false}, + {name: "ONEDEEP", shortname: "ONEDEEP", isDir: true, children: []*finalizeFileInfo{ + {name: "ONEDEEP1", shortname: "ONEDEEP1", isDir: false}, + {name: "ONEDEEP3", shortname: "ONEDEEP3", isDir: false}, + {name: "ONEDEEP2", shortname: "ONEDEEP2", isDir: false}, + }}, + } + expectedDirs := []*finalizeFileInfo{ + children[1], children[4], children[2], children[2].children[1], children[2].children[1].children[2], + } + expectedFiles := []*finalizeFileInfo{ + children[0], children[3], children[4].children[0], children[4].children[2], children[4].children[1], + children[2].children[0], children[2].children[2], children[2].children[1].children[1], children[2].children[1].children[0], + } + root.children = children + dirs, files := root.collapseAndSortChildren(1) + dirsMatch := true + if len(dirs) != len(expectedDirs) { + dirsMatch = false + } + filesMatch := true + if len(files) != len(expectedFiles) { + filesMatch = false + } + if dirsMatch { + for i, d := range dirs { + if d != expectedDirs[i] { + dirsMatch = false + break + } + } + } + if filesMatch { + for i, f := range files { + if f != expectedFiles[i] { + filesMatch = false + break + } + } + } + if !dirsMatch { + t.Error("mismatched dirs, actual then expected") + output := "" + for _, e := range dirs { + output = fmt.Sprintf("%s{%s,%v},", output, e.name, e.isDir) + } + t.Log(output) + output = "" + for _, e := range expectedDirs { + output = fmt.Sprintf("%s{%s,%v},", output, e.name, e.isDir) + } + t.Log(output) + } + if !filesMatch { + t.Error("mismatched files, actual then expected") + output := "" + for _, e := range files { + output = fmt.Sprintf("%s{%s,%v},", output, e.name, e.isDir) + } + t.Log(output) + output = "" + for _, e := range expectedFiles { + output = fmt.Sprintf("%s{%s,%v},", output, e.name, e.isDir) + } + t.Log(output) + } +} diff --git a/filesystem/iso9660/finalize_test.go b/filesystem/iso9660/finalize_test.go new file mode 100644 index 00000000..53524c15 --- /dev/null +++ b/filesystem/iso9660/finalize_test.go @@ -0,0 +1,140 @@ +package iso9660_test + +import ( + "crypto/rand" + "fmt" + "io" + "io/ioutil" + "os" + "testing" + + "github.com/deitch/diskfs/filesystem" + "github.com/deitch/diskfs/filesystem/iso9660" +) + +// full test - create some files, finalize, check the output +func TestFinalize(t *testing.T) { + blocksize := int64(2048) + f, err := ioutil.TempFile("", "iso_finalize_test") + defer os.Remove(f.Name()) + if err != nil { + t.Fatalf("Failed to create tmpfile: %v", err) + } + fs, err := iso9660.Create(f, 0, 0, blocksize) + if err != nil { + t.Fatalf("Failed to iso9660.Create: %v", err) + } + for _, dir := range []string{"/", "/FOO", "/BAR", "/ABC"} { + err = fs.Mkdir(dir) + if err != nil { + t.Fatalf("Failed to iso9660.Mkdir(%s): %v", dir, err) + } + } + var isofile filesystem.File + for _, filename := range []string{"/BAR/LARGEFILE", "/ABC/LARGEFILE", "/README.MD"} { + isofile, err = fs.OpenFile(filename, os.O_CREATE|os.O_RDWR) + if err != nil { + t.Fatalf("Failed to iso9660.OpenFile(%s): %v", filename, err) + } + // create some random data + blen := 1024 * 1024 + for i := 0; i < 5; i++ { + b := make([]byte, blen) + _, err = rand.Read(b) + if err != nil { + t.Fatalf("%d: error getting random bytes for file %s: %v", i, filename, err) + } + if _, err = isofile.Write(b); err != nil { + t.Fatalf("%d: error writing random bytes to tmpfile %s: %v", i, filename, err) + } + } + } + + fooCount := 75 + for i := 0; i <= fooCount; i++ { + filename := fmt.Sprintf("/FOO/FILENAME_%d", i) + contents := []byte(fmt.Sprintf("filename_%d\n", i)) + isofile, err = fs.OpenFile(filename, os.O_CREATE|os.O_RDWR) + if err != nil { + t.Fatalf("Failed to iso9660.OpenFile(%s): %v", filename, err) + } + if _, err = isofile.Write(contents); err != nil { + t.Fatalf("%d: error writing bytes to tmpfile %s: %v", i, filename, err) + } + } + + err = fs.Finalize() + if err != nil { + t.Fatal("Unexpected error fs.Finalize()", err) + } + // now need to check contents + fi, err := f.Stat() + if err != nil { + t.Fatalf("Error trying to Stat() iso file: %v", err) + } + // we made two 5MB files, so should be at least 10MB + if fi.Size() < 10*1024*1024 { + t.Fatalf("Resultant file too small after finalizing %d", fi.Size()) + } + + // now check the contents + fs, err = iso9660.Read(f, 0, 0, 2048) + if err != nil { + t.Fatalf("error reading the tmpfile as iso: %v", err) + } + + dirFi, err := fs.ReadDir("/") + if err != nil { + t.Errorf("error reading the root directory from iso: %v", err) + } + // we expect to have 3 entries: ABC BAR and FOO + expected := map[string]bool{ + "ABC": false, "BAR": false, "FOO": false, "README.MD": false, + } + for _, e := range dirFi { + delete(expected, e.Name()) + } + if len(expected) > 0 { + keys := make([]string, 0) + for k := range expected { + keys = append(keys, k) + } + t.Errorf("Some entries not found in root: %v", keys) + } + + // get a few files I expect + fileContents := map[string]string{ + "/FOO/FILENAME_50": "filename_50\n", + "/FOO/FILENAME_2": "filename_2\n", + } + + for k, v := range fileContents { + var ( + f filesystem.File + read int + ) + + f, err = fs.OpenFile(k, os.O_RDONLY) + if err != nil { + t.Errorf("Error opening file %s: %v", k, err) + continue + } + // check the contents + b := make([]byte, 50, 50) + read, err = f.Read(b) + if err != nil && err != io.EOF { + t.Errorf("Error reading from file %s: %v", k, err) + } + actual := string(b[:read]) + if actual != v { + t.Errorf("Mismatched content, actual '%s' expected '%s'", actual, v) + } + } + + // close the file + err = f.Close() + if err != nil { + t.Fatalf("Could not close iso file: %v", err) + } + +} diff --git a/filesystem/iso9660/iso9660.go b/filesystem/iso9660/iso9660.go new file mode 100644 index 00000000..e3b0702e --- /dev/null +++ b/filesystem/iso9660/iso9660.go @@ -0,0 +1,405 @@ +package iso9660 + +import ( + "encoding/binary" + "fmt" + "io/ioutil" + "os" + "path" + "strings" + + "github.com/deitch/diskfs/filesystem" + "github.com/deitch/diskfs/util" +) + +const ( + volumeDescriptorSize int64 = 2 * KB // each volume descriptor is 2KB + systemAreaSize int64 = 32 * KB // 32KB system area size + defaultSectorSize int64 = 2 * KB + // MaxBlocks maximum number of blocks allowed in an iso9660 filesystem + MaxBlocks int64 = 4.294967296e+09 // 2^32 +) + +// FileSystem implements the FileSystem interface +type FileSystem struct { + workspace string + size int64 + start int64 + file util.File + blocksize int64 + volumes volumeDescriptors + pathTable *pathTable + rootDir *directoryEntry +} + +// Equal compare if two filesystems are equal +func (fs *FileSystem) Equal(a *FileSystem) bool { + localMatch := fs.file == a.file && fs.size == a.size + vdMatch := fs.volumes.equal(&a.volumes) + return localMatch && vdMatch +} + +// Workspace get the workspace path +func (fs *FileSystem) Workspace() string { + return fs.workspace +} + +// Create creates an ISO9660 filesystem in a given directory +// +// requires the util.File where to create the filesystem, size is the size of the filesystem in bytes, +// start is how far in bytes from the beginning of the util.File to create the filesystem, +// and blocksize is is the logical blocksize to use for creating the filesystem +// +// note that you are *not* required to create the filesystem on the entire disk. You could have a disk of size +// 20GB, and create a small filesystem of size 50MB that begins 2GB into the disk. +// This is extremely useful for creating filesystems on disk partitions. +// +// Note, however, that it is much easier to do this using the higher-level APIs at github.com/deitch/diskfs +// which allow you to work directly with partitions, rather than having to calculate (and hopefully not make any errors) +// where a partition starts and ends. +// +// If the provided blocksize is 0, it will use the default of 2 KB. +func Create(f util.File, size int64, start int64, blocksize int64) (*FileSystem, error) { + if blocksize == 0 { + blocksize = defaultSectorSize + } + // make sure it is an allowed blocksize + if err := validateBlocksize(blocksize); err != nil { + return nil, err + } + // size of 0 means to use defaults + if size != 0 && size > MaxBlocks*blocksize { + return nil, fmt.Errorf("requested size is larger than maximum allowed ISO9660 size of %d blocks", MaxBlocks) + } + // at bare minimum, it must have enough space for the system area, one volume descriptor, one volume decriptor set terminator, and one block of data + if size != 0 && size < systemAreaSize+2*volumeDescriptorSize+blocksize { + return nil, fmt.Errorf("requested size is smaller than minimum allowed ISO9660 size: system area (%d), one volume descriptor (%d), one volume descriptor set terminator (%d), and one block (%d)", systemAreaSize, volumeDescriptorSize, volumeDescriptorSize, blocksize) + } + + // create a temporary working area where we can create the filesystem. + // It is only on `Finalize()` that we write it out to the actual disk file + tmpdir, err := ioutil.TempDir("", "diskfs_iso") + if err != nil { + return nil, fmt.Errorf("Could not create working directory: %v", err) + } + + // create root directory + // there is nothing in there + return &FileSystem{ + workspace: tmpdir, + start: start, + size: size, + file: f, + volumes: volumeDescriptors{}, + blocksize: blocksize, + }, nil +} + +// Read reads a filesystem from a given disk. +// +// requires the util.File where to read the filesystem, size is the size of the filesystem in bytes, +// start is how far in bytes from the beginning of the util.File the filesystem is expected to begin, +// and blocksize is is the logical blocksize to use for creating the filesystem +// +// note that you are *not* required to read a filesystem on the entire disk. You could have a disk of size +// 20GB, and a small filesystem of size 50MB that begins 2GB into the disk. +// This is extremely useful for working with filesystems on disk partitions. +// +// Note, however, that it is much easier to do this using the higher-level APIs at github.com/deitch/diskfs +// which allow you to work directly with partitions, rather than having to calculate (and hopefully not make any errors) +// where a partition starts and ends. +// +// If the provided blocksize is 0, it will use the default of 2K bytes +func Read(file util.File, size int64, start int64, blocksize int64) (*FileSystem, error) { + if blocksize == 0 { + blocksize = defaultSectorSize + } + // make sure it is an allowed blocksize + if err := validateBlocksize(blocksize); err != nil { + return nil, err + } + // default size of 0 means use whatever size is available + if size != 0 && size > MaxBlocks*blocksize { + return nil, fmt.Errorf("requested size is larger than maximum allowed ISO9660 size of %d blocks", MaxBlocks) + } + // at bare minimum, it must have enough space for the system area, one volume descriptor, one volume decriptor set terminator, and one block of data + if size != 0 && size < systemAreaSize+2*volumeDescriptorSize+blocksize { + return nil, fmt.Errorf("requested size is too small to allow for system area (%d), one volume descriptor (%d), one volume descriptor set terminator (%d), and one block (%d)", systemAreaSize, volumeDescriptorSize, volumeDescriptorSize, blocksize) + } + + // load the information from the disk + // read system area + systemArea := make([]byte, systemAreaSize, systemAreaSize) + n, err := file.ReadAt(systemArea, start) + if err != nil { + return nil, fmt.Errorf("Could not read bytes from file: %v", err) + } + if uint16(n) < uint16(systemAreaSize) { + return nil, fmt.Errorf("Only could read %d bytes from file", n) + } + // we do not do anything with the system area for now + + // next read the volume descriptors, one at a time, until we hit the terminator + vds := make([]volumeDescriptor, 2) + terminated := false + var pvd *primaryVolumeDescriptor + for i := 0; !terminated; i++ { + vdBytes := make([]byte, volumeDescriptorSize, volumeDescriptorSize) + // read vdBytes + read, err := file.ReadAt(vdBytes, start+systemAreaSize+int64(i)*volumeDescriptorSize) + if err != nil { + return nil, fmt.Errorf("Unable to read bytes for volume descriptor %d: %v", i, err) + } + if int64(read) != volumeDescriptorSize { + return nil, fmt.Errorf("Read %d bytes instead of expected %d for volume descriptor %d", read, volumeDescriptorSize, i) + } + // convert to a vd structure + vd, err := volumeDescriptorFromBytes(vdBytes) + if err != nil { + return nil, fmt.Errorf("Error reading Volume Descriptor: %v", err) + } + // is this a terminator? + switch vd.Type() { + case volumeDescriptorTerminator: + terminated = true + case volumeDescriptorPrimary: + vds = append(vds, vd) + pvd = vd.(*primaryVolumeDescriptor) + default: + vds = append(vds, vd) + } + } + + // load up our path table and root directory entry + var pt *pathTable + var rootDirEntry *directoryEntry + if pvd != nil { + rootDirEntry = pvd.rootDirectoryEntry + pathTableBytes := make([]byte, pvd.pathTableSize, pvd.pathTableSize) + pathTableLocation := pvd.pathTableLLocation * uint32(pvd.blocksize) + read, err := file.ReadAt(pathTableBytes, int64(pathTableLocation)) + if err != nil { + return nil, fmt.Errorf("Unable to read path table of size %d at location %d: %v", pvd.pathTableSize, pathTableLocation, err) + } + if read != len(pathTableBytes) { + return nil, fmt.Errorf("Read %d bytes of path table instead of expected %d at location %d", read, pvd.pathTableSize, pathTableLocation) + } + pt, err = parsePathTable(pathTableBytes) + if err != nil { + return nil, fmt.Errorf("Unable to parse path table of size %d at location %d: %v", pvd.pathTableSize, pathTableLocation, err) + } + } + + fs := &FileSystem{ + workspace: "", // no workspace when we do nothing with it + start: start, + size: size, + file: file, + volumes: volumeDescriptors{ + descriptors: vds, + primary: pvd, + }, + blocksize: blocksize, + pathTable: pt, + rootDir: rootDirEntry, + } + rootDirEntry.filesystem = fs + return fs, nil +} + +// Type returns the type code for the filesystem. Always returns filesystem.TypeFat32 +func (fs *FileSystem) Type() filesystem.Type { + return filesystem.TypeISO9660 +} + +// Mkdir make a directory at the given path. It is equivalent to `mkdir -p`, i.e. idempotent, in that: +// +// * It will make the entire tree path if it does not exist +// * It will not return an error if the path already exists +// +// if readonly and not in workspace, will return an error +func (fs *FileSystem) Mkdir(p string) error { + if fs.workspace == "" { + return fmt.Errorf("Cannot write to read-only filesystem") + } + err := os.MkdirAll(path.Join(fs.workspace, p), 0755) + if err != nil { + return fmt.Errorf("Could not create directory %s: %v", p, err) + } + // we are not interesting in returning the entries + return err +} + +// ReadDir return the contents of a given directory in a given filesystem. +// +// Returns a slice of os.FileInfo with all of the entries in the directory. +// +// Will return an error if the directory does not exist or is a regular file and not a directory +func (fs *FileSystem) ReadDir(p string) ([]os.FileInfo, error) { + var fi []os.FileInfo + var err error + // non-workspace: read from iso9660 + // workspace: read from regular filesystem + if fs.workspace != "" { + fullPath := path.Join(fs.workspace, p) + // read the entries + fi, err = ioutil.ReadDir(fullPath) + if err != nil { + return nil, fmt.Errorf("Could not read directory %s: %v", p, err) + } + } else { + dirEntries, err := fs.readDirectory(p) + if err != nil { + return nil, fmt.Errorf("Error reading directory %s: %v", p, err) + } + fi = make([]os.FileInfo, 0, len(dirEntries)) + for _, entry := range dirEntries { + // ignore any entry that is current directory or parent + if entry.isSelf || entry.isParent { + continue + } + fi = append(fi, entry) + } + } + return fi, nil +} + +// OpenFile returns an io.ReadWriter from which you can read the contents of a file +// or write contents to the file +// +// accepts normal os.OpenFile flags +// +// returns an error if the file does not exist +func (fs *FileSystem) OpenFile(p string, flag int) (filesystem.File, error) { + var f filesystem.File + var err error + + // get the path and filename + dir := path.Dir(p) + filename := path.Base(p) + // a single '.' separator normally is mandated, so enter it if non-existent + filenameExt := filename + if !strings.Contains(filenameExt, ".") { + filenameExt = fmt.Sprintf("%s.", filenameExt) + } + filenameExt = fmt.Sprintf("%s;1", filenameExt) + + // if the dir == filename, then it is just / + if dir == filename { + return nil, fmt.Errorf("Cannot open directory %s as file", p) + } + + // cannot open to write or append or create if we do not have a workspace + writeMode := flag&os.O_WRONLY != 0 || flag&os.O_RDWR != 0 || flag&os.O_APPEND != 0 || flag&os.O_CREATE != 0 || flag&os.O_TRUNC != 0 || flag&os.O_EXCL != 0 + if fs.workspace == "" { + if writeMode { + return nil, fmt.Errorf("Cannot write to read-only filesystem") + } + + // get the directory entries + var entries []*directoryEntry + entries, err = fs.readDirectory(dir) + if err != nil { + return nil, fmt.Errorf("Could not read directory entries for %s", dir) + } + // we now know that the directory exists, see if the file exists + var targetEntry *directoryEntry + for _, e := range entries { + eName := e.filename + // cannot do anything with directories + if eName == filename && e.IsDir() { + return nil, fmt.Errorf("Cannot open directory %s as file", p) + } + if eName == filenameExt { + // if we got this far, we have found the file + targetEntry = e + break + } + } + + // see if the file exists + // if the file does not exist, and is not opened for os.O_CREATE, return an error + if targetEntry == nil { + return nil, fmt.Errorf("Target file %s does not exist", p) + } + // now open the file + f = &File{ + directoryEntry: targetEntry, + isReadWrite: false, + isAppend: false, + offset: 0, + } + } else { + f, err = os.OpenFile(path.Join(fs.workspace, p), flag, 0644) + if err != nil { + return nil, fmt.Errorf("Target file %s does not exist: %v", p, err) + } + } + + return f, nil +} + +// readDirectory - read directory entry on iso only (not workspace) +func (fs *FileSystem) readDirectory(p string) ([]*directoryEntry, error) { + var ( + location, size uint32 + err error + n int + ) + // try from path table, then walk the directory tree + location, err = fs.pathTable.getLocation(p) + if err != nil { + return nil, fmt.Errorf("Unable to read path %s from path table: %v", p, err) + } + // if we found it, read the first directory entry to get the size + if location != 0 { + // we need 4 bytes to read the size of the directory; it is at offset 10 from beginning + dirb := make([]byte, 4) + n, err = fs.file.ReadAt(dirb, int64(location)*fs.blocksize+10) + if err != nil { + return nil, fmt.Errorf("Could not read directory %s: %v", p, err) + } + if n != len(dirb) { + return nil, fmt.Errorf("Read %d bytes instead of expected %d", n, len(dirb)) + } + // convert to uint32 + size = binary.LittleEndian.Uint32(dirb) + } else { + // if we could not find the location in the path table, try reading directly from the disk + // it is slow, but this is how Unix does it, since many iso creators *do* create illegitimate disks + location, size, err = fs.rootDir.getLocation(p) + if err != nil { + return nil, fmt.Errorf("Unable to read directory tree for %s: %v", p, err) + } + } + + // did we still not find it? + if location == 0 { + return nil, fmt.Errorf("Could not find directory %s", p) + } + + // we have a location, let's read the directories from it + b := make([]byte, size, size) + n, err = fs.file.ReadAt(b, int64(location)*fs.blocksize) + if err != nil { + return nil, fmt.Errorf("Could not read directory entries for %s: %v", p, err) + } + if n != int(size) { + return nil, fmt.Errorf("Reading directory %s returned %d bytes read instead of expected %d", p, n, size) + } + // parse the entries + entries, err := parseDirEntries(b, fs) + if err != nil { + return nil, fmt.Errorf("Could not parse directory entries for %s: %v", p, err) + } + return entries, nil +} + +func validateBlocksize(blocksize int64) error { + switch blocksize { + case 0, 512, 1024, 2048, 4096, 8192: + return nil + default: + return fmt.Errorf("blocksize for ISO9660 must be one of 500, 512, 1024, 2048, 4096, 8192") + } +} diff --git a/filesystem/iso9660/iso9660_internal_test.go b/filesystem/iso9660/iso9660_internal_test.go new file mode 100644 index 00000000..38b9eb6a --- /dev/null +++ b/filesystem/iso9660/iso9660_internal_test.go @@ -0,0 +1,66 @@ +package iso9660 + +import ( + "os" + "testing" +) + +func TestIso9660ReadDirectory(t *testing.T) { + // will use the file.iso fixture to test an actual directory + // \ (root directory) should be in one block + // \FOO should be in multiple blocks + file, err := os.Open(ISO9660File) + defer file.Close() + if err != nil { + t.Fatalf("Could not open file %s to read: %v", ISO9660File, err) + } + // FileSystem implements the FileSystem interface + pathTable, _, _, err := getValidPathTable() + if err != nil { + t.Fatalf("Could not get path table: %v", err) + } + fs := &FileSystem{ + workspace: "", // we only ever call readDirectory with no workspace + size: ISO9660Size, + start: 0, + file: file, + blocksize: 2048, + pathTable: pathTable, + } + validDe, _, _, _, err := getValidDirectoryEntries(fs) + if err != nil { + t.Fatalf("Unable to read valid directory entries: %v", err) + } + validDeExtended, _, _, err := getValidDirectoryEntriesExtended(fs) + if err != nil { + t.Fatalf("Unable to read valid directory entries extended: %v", err) + } + fs.rootDir = validDe[0] // validDe contains root directory entries, first one is the root itself + + tests := []struct { + path string + entries []*directoryEntry + }{ + {`\`, validDe}, + {"/", validDe}, + {`\FOO`, validDeExtended}, + {`/FOO`, validDeExtended}, + } + for _, tt := range tests { + entries, err := fs.readDirectory(tt.path) + switch { + case err != nil: + t.Errorf("fs.readDirectory(%s): unexpected nil error: %v", tt.path, err) + case len(entries) != len(tt.entries): + t.Errorf("fs.readDirectory(%s): number of entries do not match, actual %d expected %d", tt.path, len(entries), len(tt.entries)) + default: + for i, entry := range entries { + if !compareDirectoryEntriesIgnoreDates(entry, tt.entries[i]) { + t.Errorf("fs.readDirectory(%s) %d: entries do not match, actual then expected", tt.path, i) + t.Logf("%#v\n", entry) + t.Logf("%#v\n", tt.entries[i]) + } + } + } + } +} diff --git a/filesystem/iso9660/iso9660_test.go b/filesystem/iso9660/iso9660_test.go new file mode 100644 index 00000000..495f24f6 --- /dev/null +++ b/filesystem/iso9660/iso9660_test.go @@ -0,0 +1,457 @@ +package iso9660_test + +/* + These tests the exported functions + We want to do full-in tests with files +*/ + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strings" + "testing" + + "github.com/deitch/diskfs/filesystem" + "github.com/deitch/diskfs/filesystem/iso9660" +) + +func getOpenMode(mode int) string { + modes := make([]string, 0, 0) + if mode&os.O_CREATE == os.O_CREATE { + modes = append(modes, "CREATE") + } + if mode&os.O_APPEND == os.O_APPEND { + modes = append(modes, "APPEND") + } + if mode&os.O_RDWR == os.O_RDWR { + modes = append(modes, "RDWR") + } else { + modes = append(modes, "RDONLY") + } + return strings.Join(modes, "|") +} + +func getValidIso9660FSWorkspace() (*iso9660.FileSystem, error) { + // create the filesystem + f, err := tmpIso9660File() + if err != nil { + return nil, fmt.Errorf("Failed to create iso9660 tmpfile: %v", err) + } + return iso9660.Create(f, 0, 0, 2048) +} +func getValidIso9660FSReadOnly() (*iso9660.FileSystem, error) { + f, err := os.Open(iso9660.ISO9660File) + if err != nil { + return nil, fmt.Errorf("Failed to read iso9660 testfile %s: %v", iso9660.ISO9660File, err) + } + return iso9660.Read(f, 0, 0, 2048) +} + +func tmpIso9660File() (*os.File, error) { + filename := "iso9660_test.iso" + f, err := ioutil.TempFile("", filename) + if err != nil { + return nil, fmt.Errorf("Failed to create tempfile %s :%v", filename, err) + } + return f, nil +} + +func TestISO9660Type(t *testing.T) { + fs := &iso9660.FileSystem{} + fstype := fs.Type() + expected := filesystem.TypeISO9660 + if fstype != expected { + t.Errorf("Type() returns %v instead of expected %v", fstype, expected) + } +} + +func TestIso9660Mkdir(t *testing.T) { + t.Run("read-only", func(t *testing.T) { + fs, err := getValidIso9660FSReadOnly() + if err != nil { + t.Fatalf("Failed to get read-only ISO9660 filesystem: %v", err) + } + err = fs.Mkdir("/abcdef") + if err == nil { + t.Errorf("Received no error when trying to mkdir read-only filesystem") + } + }) + t.Run("workspace", func(t *testing.T) { + fs, err := getValidIso9660FSWorkspace() + if err != nil { + t.Errorf("Failed to get workspace: %v", err) + } + existPath := "/abc" + tests := []struct { + fs *iso9660.FileSystem + path string + err error + }{ + {fs, "/abcdef", nil}, // new one + {fs, existPath, nil}, // already exists + {fs, path.Join(existPath, "bar/def/la"), nil}, // already exists + {fs, "/a/b/c", nil}, // already exists + } + + // for fsw, we want to work at least once with a path that exists + existPathFull := path.Join(fs.Workspace(), existPath) + err = os.MkdirAll(existPathFull, 0755) + if err != nil { + t.Fatalf("Could not create path %s in workspace as %s: %v", existPath, existPathFull, err) + } + for _, tt := range tests { + fs := tt.fs + ws := fs.Workspace() + err := fs.Mkdir(tt.path) + if (err == nil && tt.err != nil) || (err != nil && err == nil) { + t.Errorf("Unexpected error mismatch. Actual: %v, expected: %v", err, tt.err) + } + // did the path exist? + if ws != "" { + fullPath := path.Join(ws, tt.path) + if _, err := os.Stat(fullPath); os.IsNotExist(err) { + t.Errorf("Path did not exist after creation base %s, in workspace %s", tt.path, fullPath) + } + } + } + }) +} + +func TestIso9660Create(t *testing.T) { + tests := []struct { + blocksize int64 + filesize int64 + fs *iso9660.FileSystem + err error + }{ + {500, 6000, nil, fmt.Errorf("blocksize for ISO9660 must be")}, + {513, 6000, nil, fmt.Errorf("blocksize for ISO9660 must be")}, + {2048, 2048*iso9660.MaxBlocks + 1, nil, fmt.Errorf("requested size is larger than maximum allowed ISO9660 size")}, + {2048, 32*iso9660.KB + 3*2048 - 1, nil, fmt.Errorf("requested size is smaller than minimum allowed ISO9660 size")}, + {2048, 10000000, &iso9660.FileSystem{}, nil}, + } + for _, tt := range tests { + // create the filesystem + f, err := tmpIso9660File() + if err != nil { + t.Errorf("Failed to create iso9660 tmpfile: %v", err) + } + fs, err := iso9660.Create(f, tt.filesize, 0, tt.blocksize) + defer os.Remove(f.Name()) + switch { + case (err == nil && tt.err != nil) || (err != nil && tt.err == nil) || (err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error())): + t.Errorf("Create(%s, %d, %d, %d): mismatched errors, actual %v expected %v", f.Name(), tt.filesize, 0, tt.blocksize, err, tt.err) + case (fs == nil && tt.fs != nil) || (fs != nil && tt.fs == nil): + t.Errorf("Create(%s, %d, %d, %d): mismatched fs, actual then expected", f.Name(), tt.filesize, 0, tt.blocksize) + t.Logf("%v", fs) + t.Logf("%v", tt.fs) + } + // we do not match the filesystems here, only check functional accuracy + } +} + +func TestISO9660Read(t *testing.T) { + // test cases: + // - invalid blocksize + // - invalid file size (too small and too big) + // - valid file + tests := []struct { + blocksize int64 + filesize int64 + bytechange int64 + fs *iso9660.FileSystem + err error + }{ + {500, 6000, -1, nil, fmt.Errorf("blocksize for ISO9660 must be")}, + {513, 6000, -1, nil, fmt.Errorf("blocksize for ISO9660 must be")}, + {512, iso9660.MaxBlocks*2048 + 10000, -1, nil, fmt.Errorf("requested size is larger than maximum allowed ISO9660 size")}, + {512, 10000000, -1, &iso9660.FileSystem{}, nil}, + } + for _, tt := range tests { + // get a temporary working file + f, err := os.Open(iso9660.ISO9660File) + if err != nil { + t.Fatal(err) + } + defer f.Close() + // create the filesystem + fs, err := iso9660.Read(f, tt.filesize, 0, tt.blocksize) + switch { + case (err == nil && tt.err != nil) || (err != nil && tt.err == nil) || (err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error())): + t.Errorf("Read(%s, %d, %d, %d): mismatched errors, actual %v expected %v", f.Name(), tt.filesize, 0, tt.blocksize, err, tt.err) + case (fs == nil && tt.fs != nil) || (fs != nil && tt.fs == nil): + t.Errorf("Read(%s, %d, %d, %d): mismatched fs, actual then expected", f.Name(), tt.filesize, 0, tt.blocksize) + t.Logf("%v", fs) + t.Logf("%v", tt.fs) + } + // we do not match the filesystems here, only check functional accuracy + } +} + +func TestIso9660ReadDir(t *testing.T) { + type testList struct { + fs *iso9660.FileSystem + path string + count int + first string + last string + err error + } + runTests := func(t *testing.T, tests []testList) { + for _, tt := range tests { + fi, err := tt.fs.ReadDir(tt.path) + switch { + case (err == nil && tt.err != nil) || (err != nil && tt.err == nil): + t.Errorf("fs.ReadDir(%s): mismatched errors, actual %v expected %v", tt.path, err, tt.err) + case len(fi) != tt.count: + t.Errorf("fs.ReadDir(%s): mismatched directory received %d entries, expected %d", tt.path, len(fi), tt.count) + case fi != nil && tt.count > 2 && fi[0].Name() != tt.first: + t.Errorf("fs.ReadDir(%s): mismatched first non-self or parent entry, actual then expected", tt.path) + t.Logf("%s", fi[0].Name()) + t.Logf("%s", tt.first) + case fi != nil && tt.count > 2 && fi[len(fi)-1].Name() != tt.last: + t.Errorf("fs.ReadDir(%s): mismatched last entry, actual then expected", tt.path) + t.Logf("%s", fi[len(fi)-1].Name()) + t.Logf("%s", tt.last) + } + } + } + t.Run("read-only", func(t *testing.T) { + fs, err := getValidIso9660FSReadOnly() + if err != nil { + t.Errorf("Failed to get read-only ISO9660 filesystem: %v", err) + } + runTests(t, []testList{ + {fs, "/abcdef", 0, "", "", fmt.Errorf("directory does not exist")}, // does not exist + // root should have 4 entries (since we do not pass back . and ..): + // . + // .. + // /ABC + // /BAR + // /FOO + // /README.MD;1 + {fs, "/", 4, "ABC", "README.MD", nil}, // exists + }, + ) + }) + t.Run("workspace", func(t *testing.T) { + fs, err := getValidIso9660FSWorkspace() + if err != nil { + t.Errorf("Failed to get workspace: %v", err) + } + // make sure existPath exists in the workspace + ws := fs.Workspace() + existPath := "/abc" + existPathWs := path.Join(ws, existPath) + os.MkdirAll(existPathWs, 0755) + // create files + for i := 0; i < 10; i++ { + filename := path.Join(existPathWs, fmt.Sprintf("filename_%d", i)) + contents := fmt.Sprintf("abcdefg %d", i) + ioutil.WriteFile(filename, []byte(contents), 0644) + } + // get the known []FileInfo + fi, err := ioutil.ReadDir(existPathWs) + if err != nil { + t.Errorf("Failed to read directory %s in workspace as %s: %v", existPath, existPathWs, err) + } + // convert to []*os.FileInfo to be useful + fis := make([]*os.FileInfo, 0, len(fi)) + for _, e := range fi { + fis = append(fis, &e) + } + runTests(t, []testList{ + {fs, "/abcdef", 0, "", "", fmt.Errorf("directory does not exist")}, // does not exist + {fs, existPath, 10, "filename_0", "filename_9", nil}, // exists + }, + ) + + }) +} + +func TestIso9660OpenFile(t *testing.T) { + // opening directories and files for reading + type testStruct struct { + path string + mode int + expected string + err error + } + + t.Run("Read", func(t *testing.T) { + runTests := func(t *testing.T, fs *iso9660.FileSystem, tests []testStruct) { + for _, tt := range tests { + header := fmt.Sprintf("OpenFile(%s, %s)", tt.path, getOpenMode(tt.mode)) + reader, err := fs.OpenFile(tt.path, tt.mode) + switch { + case (err == nil && tt.err != nil) || (err != nil && tt.err == nil) || (err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error())): + t.Errorf("%s: mismatched errors, actual: %v , expected: %v", header, err, tt.err) + case reader == nil && (tt.err == nil || tt.expected != ""): + t.Errorf("%s: Unexpected nil output", header) + case reader != nil: + b, err := ioutil.ReadAll(reader) + if err != nil { + t.Errorf("%s: ioutil.ReadAll(reader) unexpected error: %v", header, err) + } + if string(b) != tt.expected { + t.Errorf("%s: mismatched contents, actual then expected", header) + t.Log(string(b)) + t.Log(tt.expected) + } + } + } + } + t.Run("read-only", func(t *testing.T) { + fs, err := getValidIso9660FSReadOnly() + if err != nil { + t.Errorf("Failed to get read-only ISO9660 filesystem: %v", err) + } + tests := []testStruct{ + // error opening a directory + {"/", os.O_RDONLY, "", fmt.Errorf("Cannot open directory %s as file", "/")}, + // open non-existent file for read or read write + {"/abcdefg", os.O_RDONLY, "", fmt.Errorf("Target file %s does not exist", "/abcdefg")}, + // open file for read or read write and check contents + {"/FOO/FILENA01", os.O_RDONLY, "filename_1\n", nil}, + {"/FOO/FILENA75", os.O_RDONLY, "filename_9\n", nil}, + } + runTests(t, fs, tests) + }) + t.Run("workspace", func(t *testing.T) { + fs, err := getValidIso9660FSWorkspace() + if err != nil { + t.Errorf("Failed to get workspace: %v", err) + } + // make sure our test files exist and have necessary content + ws := fs.Workspace() + subdir := "/FOO" + os.MkdirAll(path.Join(ws, subdir), 0755) + for i := 0; i <= 75; i++ { + filename := fmt.Sprintf("FILENA%02d", i) + content := fmt.Sprintf("filename_%d\n", i) + ioutil.WriteFile(path.Join(ws, subdir, filename), []byte(content), 0644) + } + tests := []testStruct{ + // error opening a directory + {"/", os.O_RDONLY, "", fmt.Errorf("Cannot open directory %s as file", "/")}, + // open non-existent file for read or read write + {"/abcdefg", os.O_RDONLY, "", fmt.Errorf("Target file %s does not exist", "/abcdefg")}, + // open file for read or read write and check contents + {"/FOO/FILENA01", os.O_RDONLY, "filename_1\n", nil}, + {"/FOO/FILENA75", os.O_RDONLY, "filename_75\n", nil}, + } + runTests(t, fs, tests) + }) + }) + + // write / create-and-write files and check contents + // *** Write - writes right after last write or read + // *** Read - reads right after last write or read + // ** WriteAt - writes at specific location in file + // ** ReadAt - reads at specific location in file + t.Run("Write", func(t *testing.T) { + t.Run("read-only", func(t *testing.T) { + flags := []int{ + os.O_CREATE, os.O_APPEND, os.O_WRONLY, os.O_RDWR, + } + fs, err := getValidIso9660FSReadOnly() + if err != nil { + t.Errorf("Failed to get read-only ISO9660 filesystem: %v", err) + } + for _, m := range flags { + _, err := fs.OpenFile("/NEWFILE", os.O_CREATE) + if err == nil { + t.Errorf("Did not return error when opening a file with read flag %v in read-only filesystem", m) + } + } + }) + t.Run("workspace", func(t *testing.T) { + fs, err := getValidIso9660FSWorkspace() + if err != nil { + t.Errorf("Failed to get workspace: %v", err) + } + baseContent := "INITIAL DATA GALORE\n" + editFile := "/EXISTS.TXT" + tests := []struct { + path string + mode int + beginning bool // true = "Seek() to beginning of file before writing"; false = "read entire file then write" + contents string + expected string + err error + }{ + // - open for create file that does not exist (write contents, check that written) + {"/abcdefg", os.O_RDWR | os.O_CREATE, false, "This is a test", "This is a test", nil}, + // - open for readwrite file that does exist (write contents, check that overwritten) + {editFile, os.O_RDWR, true, "This is a very long replacement string", "This is a very long replacement string", nil}, + {editFile, os.O_RDWR, true, "Two", "TwoTIAL DATA GALORE\n", nil}, + {editFile, os.O_RDWR, false, "This is a very long replacement string", "INITIAL DATA GALORE\nThis is a very long replacement string", nil}, + {editFile, os.O_RDWR, false, "Two", "INITIAL DATA GALORE\nTwo", nil}, + // - open for append file that does exist (write contents, check that appended) + {editFile, os.O_APPEND, false, "More", "", fmt.Errorf("write ")}, + {editFile, os.O_APPEND | os.O_RDWR, false, "More", "INITIAL DATA GALORE\nMore", nil}, + {editFile, os.O_APPEND, true, "More", "", fmt.Errorf("write ")}, + {editFile, os.O_APPEND | os.O_RDWR, true, "More", "INITIAL DATA GALORE\nMore", nil}, + } + for i, tt := range tests { + filepath := path.Join(fs.Workspace(), tt.path) + // remove any old file if it exists - ignore errors + _ = os.Remove(filepath) + // if the file is supposed to exist, create it and add its contents + if tt.mode&os.O_CREATE != os.O_CREATE { + ioutil.WriteFile(filepath, []byte(baseContent), 0644) + } + header := fmt.Sprintf("%d: OpenFile(%s, %s, %t)", i, tt.path, getOpenMode(tt.mode), tt.beginning) + readWriter, err := fs.OpenFile(tt.path, tt.mode) + switch { + case err != nil: + t.Errorf("%s: unexpected error: %v", header, err) + case readWriter == nil: + t.Errorf("%s: Unexpected nil output", header) + default: + // read to the end of the file + var offset int64 + _, err := readWriter.Seek(0, os.SEEK_END) + if err != nil { + t.Errorf("%s: Seek end of file gave unexpected error: %v", header, err) + continue + } + if tt.beginning { + offset, err = readWriter.Seek(0, os.SEEK_SET) + if err != nil { + t.Errorf("%s: Seek(0,os.SEEK_SET) unexpected error: %v", header, err) + continue + } + if offset != 0 { + t.Errorf("%s: Seek(0,os.SEEK_SET) reset to %d instead of %d", header, offset, 0) + continue + } + } + bWrite := []byte(tt.contents) + written, writeErr := readWriter.Write(bWrite) + readWriter.Seek(0, os.SEEK_SET) + bRead, readErr := ioutil.ReadAll(readWriter) + + switch { + case readErr != nil: + t.Errorf("%s: ioutil.ReadAll() unexpected error: %v", header, readErr) + case (writeErr == nil && tt.err != nil) || (writeErr != nil && tt.err == nil) || (writeErr != nil && tt.err != nil && !strings.HasPrefix(writeErr.Error(), tt.err.Error())): + t.Errorf("%s: readWriter.Write(b) mismatched errors, actual: %v , expected: %v", header, writeErr, tt.err) + case written != len(bWrite) && tt.err == nil: + t.Errorf("%s: readWriter.Write(b) wrote %d bytes instead of expected %d", header, written, len(bWrite)) + case string(bRead) != tt.expected && tt.err == nil: + t.Errorf("%s: mismatched contents, actual then expected", header) + t.Log(string(bRead)) + t.Log(tt.expected) + } + } + } + }) + }) +} + +func TestIso9660Finalize(t *testing.T) { + +} diff --git a/filesystem/iso9660/pathtable.go b/filesystem/iso9660/pathtable.go new file mode 100644 index 00000000..930eed27 --- /dev/null +++ b/filesystem/iso9660/pathtable.go @@ -0,0 +1,159 @@ +package iso9660 + +import ( + "encoding/binary" + "fmt" +) + +// pathTable represents an on-iso path table +type pathTable struct { + records []*pathTableEntry +} + +type pathTableEntry struct { + nameSize uint8 + size uint16 + extAttrLength uint8 + location uint32 + parentIndex uint16 + dirname string +} + +func (pt *pathTable) equal(b *pathTable) bool { + switch { + case (pt == nil && b != nil) || (pt != nil && b == nil): + return false + case len(pt.records) != len(b.records): + return false + default: + for i, e := range pt.records { + if *e != *b.records[i] { + return false + } + } + } + return true +} + +func (pt *pathTable) names() []string { + ret := make([]string, len(pt.records)) + for i, v := range pt.records { + ret[i] = v.dirname + } + return ret +} + +func (pt *pathTable) toLBytes() []byte { + b := make([]byte, 0) + for _, e := range pt.records { + name := []byte(e.dirname) + nameSize := len(name) + size := 8 + uint16(nameSize) + if nameSize%2 != 0 { + size++ + } + + b2 := make([]byte, size, size) + b2[0] = uint8(nameSize) + b2[1] = e.extAttrLength + binary.LittleEndian.PutUint32(b2[2:6], e.location) + binary.LittleEndian.PutUint16(b2[6:8], e.parentIndex) + copy(b2[8:8+nameSize], name) + if nameSize%2 != 0 { + b2[8+nameSize] = 0 + } + b = append(b, b2...) + } + return b +} +func (pt *pathTable) toMBytes() []byte { + b := make([]byte, 0) + for _, e := range pt.records { + name := []byte(e.dirname) + nameSize := len(name) + size := 8 + uint16(nameSize) + if nameSize%2 != 0 { + size++ + } + + b2 := make([]byte, size, size) + b2[0] = uint8(nameSize) + b2[1] = e.extAttrLength + binary.BigEndian.PutUint32(b2[2:6], e.location) + binary.BigEndian.PutUint16(b2[6:8], e.parentIndex) + copy(b2[8:8+nameSize], name) + if nameSize%2 != 0 { + b2[8+nameSize] = 0 + } + b = append(b, b2...) + } + return b +} + +// getLocation gets the location of the extent that contains this path +// we can get the size because the first record always points to the current directory +func (pt *pathTable) getLocation(p string) (uint32, error) { + // break path down into parts and levels + parts, err := splitPath(p) + if err != nil { + return 0, fmt.Errorf("Could not parse path: %v", err) + } + // level represents the level of the parent + var level uint16 = 1 + var location uint32 + if len(parts) == 0 { + location = pt.records[0].location + } else { + current := parts[0] + // loop through the path table until we find our entry + // we always can go forward because of the known depth ordering of path table + for i, entry := range pt.records { + // did we find a match for our current level? + if entry.parentIndex == level && entry.dirname == current { + level = uint16(i) + if len(parts) > 1 { + parts = parts[1:] + } else { + // this is the final one, we found it, keep it + location = entry.location + break + } + } + } + } + return location, nil +} + +// parsePathTable load pathtable bytes into structures +func parsePathTable(b []byte) (*pathTable, error) { + totalSize := len(b) + entries := make([]*pathTableEntry, 0, 20) + for i := 0; i < totalSize; { + nameSize := uint8(b[i]) + // is it zeroes? If so, we are at the end + if nameSize == 0 { + break + } + size := 8 + uint16(nameSize) + if nameSize%2 != 0 { + size++ + } + extAttrSize := uint8(b[i+1]) + location := binary.LittleEndian.Uint32(b[i+2 : i+6]) + parent := binary.LittleEndian.Uint16(b[i+6 : i+8]) + name := string(b[i+8 : i+8+int(nameSize)]) + entry := &pathTableEntry{ + nameSize: nameSize, + size: size, + extAttrLength: extAttrSize, + location: location, + parentIndex: parent, + dirname: name, + } + entries = append(entries, entry) + i += int(size) + } + return &pathTable{ + records: entries, + }, nil +} diff --git a/filesystem/iso9660/pathtable_internal_test.go b/filesystem/iso9660/pathtable_internal_test.go new file mode 100644 index 00000000..341aa6a3 --- /dev/null +++ b/filesystem/iso9660/pathtable_internal_test.go @@ -0,0 +1,105 @@ +package iso9660 + +import ( + "bytes" + "fmt" + "io/ioutil" + "testing" +) + +func getValidPathTable() (*pathTable, []byte, []byte, error) { + blocksize := 2048 + // sector 27 - L path table + // sector 28 - M path table + pathTableLSector := 27 + pathTableMSector := 28 + // read correct bytes off of disk + input, err := ioutil.ReadFile(ISO9660File) + if err != nil { + return nil, nil, nil, fmt.Errorf("Error reading data from iso9660 test fixture %s: %v", ISO9660File, err) + } + + startL := pathTableLSector * blocksize // start of pathtable in file.iso + + // one block, since we know it is just one block + LBytes := input[startL : startL+blocksize] + + startM := pathTableMSector * blocksize // start of pathtable in file.iso + + // one block, since we know it is just one block + MBytes := input[startM : startM+blocksize] + + entries := []*pathTableEntry{ + {nameSize: 0x1, size: 0xa, extAttrLength: 0x0, location: 0x12, parentIndex: 0x1, dirname: "\x00"}, + {nameSize: 0x3, size: 0xc, extAttrLength: 0x0, location: 0x14, parentIndex: 0x1, dirname: "ABC"}, + {nameSize: 0x3, size: 0xc, extAttrLength: 0x0, location: 0x15, parentIndex: 0x1, dirname: "BAR"}, + {nameSize: 0x3, size: 0xc, extAttrLength: 0x0, location: 0x16, parentIndex: 0x1, dirname: "FOO"}, + } + + return &pathTable{ + records: entries, + }, LBytes, MBytes, nil +} + +func TestPathTableToLBytes(t *testing.T) { + // the one on disk is padded to the end of the sector + b := make([]byte, 2048) + validTable, validBytes, _, _ := getValidPathTable() + b2 := validTable.toLBytes() + copy(b, b2) + + if bytes.Compare(b, validBytes) != 0 { + t.Errorf("Mismatched path table bytes. Actual then expected") + t.Logf("%#v", b) + t.Logf("%#v", validBytes) + } +} +func TestPathTableToMBytes(t *testing.T) { + // the one on disk is padded to the end of the sector + b := make([]byte, 2048) + validTable, _, validBytes, _ := getValidPathTable() + b2 := validTable.toMBytes() + copy(b, b2) + + if bytes.Compare(b, validBytes) != 0 { + t.Errorf("Mismatched path table bytes. Actual then expected") + t.Logf("%#v", b) + t.Logf("%#v", validBytes) + } +} + +func TestPathTableGetLocation(t *testing.T) { + table, _, _, _ := getValidPathTable() + tests := []struct { + path string + location uint32 + err error + }{ + {"/", 0x12, nil}, + {"/FOO", 0x16, nil}, + {"/nothereatall", 0x00, nil}, + } + + for _, tt := range tests { + location, err := table.getLocation(tt.path) + if (err != nil && tt.err == nil) || (err == nil && tt.err != nil) { + t.Errorf("Mismatched error, actual: %v vs expected: %v", err, tt.err) + } + if location != tt.location { + t.Errorf("Mismatched location, actual: %d vs expected: %d", location, tt.location) + } + } +} + +func TestParsePathTable(t *testing.T) { + validTable, b, _, _ := getValidPathTable() + table, err := parsePathTable(b) + if err != nil { + t.Errorf("Unexpected error when parsing path table: %v", err) + } + if !table.equal(validTable) { + t.Errorf("Mismatched path tables. Actual then expected") + t.Logf("%#v", table.records) + t.Logf("%#v", validTable.records) + } +} diff --git a/filesystem/iso9660/testdata/.gitignore b/filesystem/iso9660/testdata/.gitignore new file mode 100644 index 00000000..b070bfd0 --- /dev/null +++ b/filesystem/iso9660/testdata/.gitignore @@ -0,0 +1 @@ +isoutil diff --git a/filesystem/iso9660/testdata/README.md b/filesystem/iso9660/testdata/README.md new file mode 100644 index 00000000..cb791b3e --- /dev/null +++ b/filesystem/iso9660/testdata/README.md @@ -0,0 +1,54 @@ +# ISO9660 Test Fixtures +This directory contains test fixtures for FAT32 filesystems. Specifically, it contains the following files: + +* `file.iso`: A 10MB iso9660 image +* `volrecords.iso`: The volume descriptor set from a real complex distribution, specifically `Ubuntu-Server 18.04.1 LTS amd64` + +To generate the `file.iso` : + + +``` +./buildtestiso.sh +``` + +We make the `\foo` directory with sufficient entries to exceed a single sector (>2048 bytes). This allows us to test reading directories past a sector boundary). Since each directory entry is at least ~34 bytes + filesize name, we create 10 byte filenames, for a directory entry of 44 bytes. With a sector size of 2048 bytes, we need 2048/44 = 46 entries to fill the cluster and one more to get to the next one, so we make 50 entries. + +To generate the `volrecords.iso`: + +1. Download Ubuntu Server 18.0.4.1 LTS amd64 from http://releases.ubuntu.com/18.04.1/ubuntu-18.04.1-live-server-amd64.iso?_ga=2.268908601.917862151.1539151848-2128720580.1476045272 +2. Copy out the desired bytes: `dd if=ubuntu-18.04.1-live-server-amd64.iso of=volrecords.iso bs=2048 count=4 skip=16` + +## Utility +This directory contains a utility to output data from an ISO. It can: + +* read a directory and its entries +* read a path table + +To build it: + +``` +go build isoutil.go +``` + +To run it, run `./isoutil `. The rest of this section describes it. + +### Reading an ISO directory +To read an ISO directory: + +``` +./isoutil directory +``` + +Where: +* `` name of the ISO file, e.g. `file.iso` +* `` absolute path to the directory, e.g. `/FOO` + +### Reading an ISO path table +To read the path table: + +``` +./isoutil readpath +``` + +Where: +* `` name of the ISO file, e.g. `file.iso` diff --git a/filesystem/iso9660/testdata/buildtestiso.sh b/filesystem/iso9660/testdata/buildtestiso.sh new file mode 100644 index 00000000..0edbcc8c --- /dev/null +++ b/filesystem/iso9660/testdata/buildtestiso.sh @@ -0,0 +1,13 @@ +#!/bin/sh + +cat << "EOF" | docker run -i --rm -v $PWD:/data alpine:3.8 +apk --update add xorriso +mkdir -p /build +cd /build +mkdir foo bar abc +dd if=/dev/zero of=bar/largefile bs=1M count=5 +dd if=/dev/zero of=abc/largefile bs=1M count=5 +i=0 +until [ $i -gt 75 ]; do echo "filename_"${i} > foo/filename_${i}; i=$(( $i+1 )); done +xorriso -as mkisofs -o /data/file.iso . +EOF diff --git a/filesystem/iso9660/testdata/file.iso b/filesystem/iso9660/testdata/file.iso new file mode 100644 index 0000000000000000000000000000000000000000..9492893f7c73bc0ba0b33cebec64043dc38ad65d GIT binary patch literal 11018240 zcmeF(Yi}D>0tVpm7HPW_v8&yd4+T}`UT&$cal+kkoYs)U!A?rctruv4(k>DoR>!#xw2My^bBYQ)RvV8mnRm{U2rz_iTUG)9y$WzhZs~NtmFIU$GE8+2#ozZ4|&YgSK#`|vn z`0($X^Pu4e;HBDfqf@W9I;~c%R{Y-`wRs>VyF2?-~kZ$@9zBn z0Bnj)AV7cs0RjYmA%TA$c#J*X`eWta|0%XEJX5*dEw`$BI~;tMm))y)qx`tNcCzy5 z?lJoG>V-Q;?7jXImA}O&?p_te?RfikarvNasQkR}8^!S$b^NaL2&e9xM>x`5=^ZZN zR2SOl1}a=t#n6+OFDU1(#e=~^~yIlipS!Z9xG0l z3#a4rPVXhH4TpzI+Fir$m`=x}>tBEUZE#AgQLn#H4QmLgdru6JVRdVR@HvCcuo2-E ziP6RC`QiG`M!y<1rxymX9k!(0nmbv8&2VdDy4u~>8SWRQ^%5XJfB=DCjlkstj*Tma zJn+Bw$oPIA+kaa;8Lh+B*7V=>#KWob&Ts@;Ikw&%o$ZI6l7&Maou1hp$==oU==A25 zZ$7_v{mYxzel=fU8%lrx0RjYmLg4ZNXRHr9|KEGQ`WKl0A1;UW@ZIzO$@0FRH~;UB zWbexSUz)lQAV7e?Z$sen0k0z;cK*NjRpk3U|4)i1mm8+nEdo3@tg3G zpB{5#-sFA11Mru9W3JaG#WOManRIe9cXEAFJR6grO((Zj&fRW}(5sD@+(;+qZnsA0)n-g? zrjv8GTO;&pD<-$n$+_FD5qh;9liSJUM(%cNgkD{Y$&2aa-0ju~z1oS%opkct?KY!V z8=+U1V)9b5R~vJ;+g`oiy+7Oxy}BHem(yd;-EPg$tFOl7SJTP4JFFS@N?(h~ucec7 zcUUvD=j$=~^>lLX4r_+?d?O~mkxtItVa?Eqn^IQ=01M=|-Mbn@ICHY+)_=f^Sm<7D#U+#NP6Ike{|G5M2p^4uLZD>=01rI>svojiAk z%}Q?1p1JcpGAS;{kzG!YZ0?SnHL}poS7P#&bn@ICIV(A|^VOJqHJv z{xqFDcSp`j4(k0$tJ)hzr(S_Yuf41y64iq3o5ug`A3fO=z+9Dc}Lup`&6v$C5 zrAVK>hm^x(phX`b-?}htlRQK2wYU5k61fQwAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0_g(t zVbL$UoBrpMNjio42@oJafIy7EKL1bUe@xRAN`L?X0`USZ|9t}>ei#4{AV7dXj6lnO z-vEedxLfsb0D*Xc zmjAEifBZB6AV7csffRw3|KG^}l%}hb009C7;sy5k@8kdRvjBhq0RjYa1X}*v|C`fw zl@cI8fIzxH%YXZS(`P~b1PBlykR#CY-wyzCx~@_J1PBmF7ijr!|8M#%sGk4<0t9jd zTK?Pro6~ib5+Fc;K)OK7fBS#aXF>f02oNBUBhd2S{@c|=B|v}xfpmeE|Mvf;&w~015FkJxN1)}u{l7U~S1ADk1PG)HwEVaK zH+>e=Pk;ac0yzRL|Gfb)r|T*uK!5;&bb*%t_W!2Og8B&%AV45Tpyj{)zd2o3DFFfm z2&4jeP?f*@m1@#jkK!8AwK+Avoe{;I7 zQUU}B5J(qj`EUPk`YfoQ009C7as*ob+y9%>b(InzK!8BHK+Avof753{{R9XQAdn-_ z^4|vlbGoil0t5&UNEc}NZ~t%lEU2FV0RjYa1X}*v|C`fwl@cI8fIzxH%YXZS(`P~b z1PBlykR!0qf8YMkX}U@Y5FkJxUZCZ_{lD?U0Du4i0t8|NTK?Pr8`E@!5+Fc;K)gW9 zfBS#qhXDWq0t5)e2($oNqN_A#`42( zKi;kT&E(znuqm@)-A~8W`k_o$%ds1ln=)=6HUIReGGDFB58uCg@x0!&=r<3WvA@|I zmAAM3beIp*Zn1cHR)+Dj|WP!(_QtG$-7m#>X-ex8~ZZWtF#lPUg6^6VscPstLcY+UDkKYa#)}B@Nj)o4u2^J ze|!IzH^;MX>`J$|9hUu@7e}urUzYCnLw(HMZM~;2-E3CwSL=0s+IrW`hgy3%ye+$9 z^@ize|HsGs=Brh??AE2boYuSR;bDG$JnMfs{t^~>#KfSTDjbupZ-`K z?v~^E%m00#Ug`fF7|u`Xv3h)1l)smQp&VX~PW+=j&BbTK@^Kii%jRxgCt&E8W2wK^Ltkg7zRvq;-_J@{Hnsni zWwV>F^5nbcCugtAr$=Qse06wMA2;Q0cWyU!>%N<9$L9C3++OpxtMgbd_jsMl z>O1S!5A_;vhvl>{eK)<{KmK9&N*?NDm2NDHetY!aFU}sHwtV$Cj>E5gS#~%5v+}Vo z)B2QkHcn4=kJ&w7d*9~SZt}bTy7FpX&OTl5sJ!1Y>|feud*OP2D|Gh^%Pq(Dw&Qy6 zyqvxIT&uQju^aa3;;w#a)nQxCN^MxVS*@%6_A6rC{HkN&G)`00 ... \nCommands: directory pathtable", args[0]) + } + + cmd := args[1] + opts := args[2:] + switch cmd { + case "directory": + readdirCmd(opts) + case "pathtable": + readpathCmd(opts) + default: + log.Fatalf("Unknown command: %s", cmd) + } + +} + +func readpathCmd(opts []string) { + if len(opts) != 1 { + log.Fatalf("Command 'pathtable' must have exactly one arguments. Options: ") + } + filename := opts[0] + f, err := os.Open(filename) + if err != nil { + log.Fatalf("Failed to open file %s: %v", filename, err) + } + + // get the path table L location and size from the primary volume descriptor + b := make([]byte, blocksize, blocksize) + // get the primary volume descriptor + read, err := f.ReadAt(b, pvdBlock*int64(blocksize)) + if err != nil { + log.Fatalf("Error reading path table location: %v", err) + } + if read != len(b) { + log.Fatalf("Read %d bytes instead of expected %d", read, len(b)) + } + // get the location and size + size := binary.LittleEndian.Uint32(b[132 : 132+4]) + location := binary.LittleEndian.Uint32(b[140 : 140+4]) + + // read in the path table + ptBytes := make([]byte, size, size) + read, err = f.ReadAt(ptBytes, int64(location*blocksize)) + if err != nil { + log.Fatalf("Error reading path table of size from location %d: %v", size, location, err) + } + if read != len(ptBytes) { + log.Fatalf("Read %d bytes instead of expected %d", read, len(b)) + } + + // now parse the path table + // cycle through + entries := make([]*pathEntry, 0, 10) + // basic bytes are 9 + for i := 0; i < len(ptBytes); { + // get the size of the next record + nameSize := ptBytes[i+0] + recordSize := uint16(nameSize) + 8 + if nameSize%2 != 0 { + recordSize++ + } + + e := &pathEntry{ + nameSize: nameSize, + size: recordSize, + extAttrLength: ptBytes[i+1], + location: binary.LittleEndian.Uint32(ptBytes[i+2 : i+6]), + parentIndex: binary.LittleEndian.Uint16(ptBytes[i+6 : i+8]), + dirname: string(ptBytes[i+8 : i+8+int(nameSize)]), + } + entries = append(entries, e) + i += int(recordSize) + } + + dump(pathEntryList(entries)) +} +func readdirCmd(opts []string) { + if len(opts) != 2 { + log.Fatalf("Command 'directory' must have exactly two arguments. Options: ") + } + filename := opts[0] + p := opts[1] + f, err := os.Open(filename) + if err != nil { + log.Fatalf("Failed to open file %s: %v", filename, err) + } + + // simplistically get the root file system + b := make([]byte, blocksize, blocksize) + // get the primary volume descriptor + read, err := f.ReadAt(b, pvdBlock*int64(blocksize)) + if err != nil { + log.Fatalf("Error reading primary volume descriptor: %v", err) + } + if read != len(b) { + log.Fatalf("Read %d bytes instead of expected %d", read, len(b)) + } + // get the root directory block and size + rootDirEntryBytes := b[156 : 156+34] + // get the location and size + location := binary.LittleEndian.Uint32(rootDirEntryBytes[2 : 2+4]) + size := binary.LittleEndian.Uint32(rootDirEntryBytes[10 : 10+4]) + + // now parse the requested path and find out which one we want + parts, err := splitPath(p) + if err != nil { + log.Fatalf("Could not parse path %s: %v", p, err) + } + err = readAndProcessDirs(parts, location, size, f) + if err != nil { + log.Fatalf("Failed to process path %s: %v", p, err) + } +} + +func readAndProcessDirs(parts []string, location, size uint32, f *os.File) error { + dirs := readDirectory(location, size, f) + if len(parts) < 1 { + dump(dirEntryList(dirs)) + } else { + current, parts := parts[0], parts[1:] + child := findChild(current, dirs) + if child == nil { + return fmt.Errorf("Could not find directory %s", current) + } + readAndProcessDirs(parts, child.location, child.size, f) + } + return nil +} + +func findChild(name string, entries []*dirEntry) *dirEntry { + for _, e := range entries { + if name == e.filename { + return e + } + } + return nil +} + +func dump(entries Enumerable) { + entries.Each(func(e Printable) { + val := fmt.Sprintf("%#v", e) + // strip the type header and add a , at the end + re := regexp.MustCompile(`^&main\.[^{]*`) + valPure := re.ReplaceAllString(val, ``) + fmt.Printf("%s,\n", valPure) + }) +} + +func readDirectory(location, size uint32, f *os.File) []*dirEntry { + // read the correct number of bytes, then process entries one by one + b := make([]byte, size, size) + read, err := f.ReadAt(b, int64(location)*int64(blocksize)) + if err != nil { + log.Fatalf("Failed to read directory at location %d", location) + } + if read != len(b) { + log.Fatalf("Read %d bytes instead of expected %d at location %d", read, len(b), location) + } + // cycle through + entries := make([]*dirEntry, 0, 10) + for i := 0; i < len(b); { + // get the size of the next record + recordSize := b[i+0] + // size == 0 means we have no more in this sector + if recordSize == 0 { + i += (blocksize - i%blocksize) + continue + } + recordBytes := b[i+0 : i+int(recordSize)] + i += int(recordSize) + + extAttrSize := recordBytes[1] + location := binary.LittleEndian.Uint32(recordBytes[2:6]) + size := binary.LittleEndian.Uint32(recordBytes[10:14]) + + // get the flags + isSubdirectory := recordBytes[26]&0x02 == 0x02 + + // size includes the ";1" at the end as two bytes if a file and not a directory + namelen := recordBytes[32] + + // get the filename itself + filename := string(recordBytes[33 : 33+namelen]) + + e := &dirEntry{ + recordSize: recordSize, + extAttrSize: extAttrSize, + location: location, + size: size, + isSubdirectory: isSubdirectory, + filename: filename, + } + entries = append(entries, e) + } + return entries +} + +func universalizePath(p string) (string, error) { + // globalize the separator + ps := strings.Replace(p, "\\", "/", 0) + if ps[0] != '/' { + return "", errors.New("Must use absolute paths") + } + return ps, nil +} +func splitPath(p string) ([]string, error) { + ps, err := universalizePath(p) + if err != nil { + return nil, err + } + // we need to split such that each one ends in "/", except possibly the last one + parts := strings.Split(ps, "/") + // eliminate empty parts + ret := make([]string, 0) + for _, sub := range parts { + if sub != "" { + ret = append(ret, sub) + } + } + return ret, nil +} diff --git a/filesystem/iso9660/testdata/volrecords.iso b/filesystem/iso9660/testdata/volrecords.iso new file mode 100644 index 0000000000000000000000000000000000000000..f6d6867c4008658ea51cd16757b028ca8d17cb0e GIT binary patch literal 8192 zcmeH~%}&BV5Xb)&!9DVkEJ4_R^j1J4pMsf73o)(agEP0rEVq$uBaej4*6TMKc7e_8#KM1_gFDc8m zD#Dh^wxna((o$tf$66Fk&n3Ux30hIi($WaO>9PCBaVBk_09U1B-`M5yophwcnjyNc z+eW&$EKGn2FoAzTAo~B;o1*OpUfUPpW4-1zyxM)+^INIE@FR632QmRBzyz286JP>+ zCvdW;{~;1u2R%$M#F)ksLi91hbUq3wR972W)c=>)^_A(b36(*H?z;XTWOYP#@D>NY z>uG_K&1Jl){!jSkq&*$JzY8A$HIGijr*l!TU{kLU85@#xtw}g@yQEQ}OuB9gMT%> 8), byte(r & 0x00ff)} + b = append(b, tmpb...) + } + return b +} + +func bytesToUCS2String(b []byte) string { + r := make([]rune, 0, 30) + // now we can iterate + for i := 0; i < len(b); { + // little endian + val := uint16(b[i])<<8 + uint16(b[i+1]) + r = append(r, rune(val)) + i += 2 + } + return string(r) +} + +// maxInt returns the larger of x or y. +func maxInt(x, y int) int { + if x < y { + return y + } + return x +} diff --git a/filesystem/iso9660/volume_descriptor.go b/filesystem/iso9660/volume_descriptor.go new file mode 100644 index 00000000..817cfbee --- /dev/null +++ b/filesystem/iso9660/volume_descriptor.go @@ -0,0 +1,478 @@ +package iso9660 + +import ( + "bytes" + "encoding/binary" + "fmt" + "strconv" + "time" +) + +type volumeDescriptorType uint8 + +const ( + volumeDescriptorBoot volumeDescriptorType = 0x00 + volumeDescriptorPrimary volumeDescriptorType = 0x01 + volumeDescriptorSupplementary volumeDescriptorType = 0x02 + volumeDescriptorPartition volumeDescriptorType = 0x03 + volumeDescriptorTerminator volumeDescriptorType = 0xff +) + +const ( + isoIdentifier uint64 = 0x4344303031 // string "CD001" + isoVersion uint8 = 0x01 +) + +// volumeDescriptor interface for any given type of volume descriptor +type volumeDescriptor interface { + Type() volumeDescriptorType + toBytes() []byte + equal(volumeDescriptor) bool +} + +type primaryVolumeDescriptor struct { + systemIdentifier string // length 32 bytes + volumeIdentifier string // length 32 bytes + volumeSize uint32 // in blocks + setSize uint16 + sequenceNumber uint16 + blocksize uint16 + pathTableSize uint32 + pathTableLLocation uint32 + pathTableLOptionalLocation uint32 + pathTableMLocation uint32 + pathTableMOptionalLocation uint32 + rootDirectoryEntry *directoryEntry + volumeSetIdentifier string // 128 bytes + publisherIdentifier string // 128 bytes + preparerIdentifier string // 128 bytes + applicationIdentifier string // 128 bytes + copyrightFile string // 37 bytes + abstractFile string // 37 bytes + bibliographicFile string // 37 bytes + creation time.Time + modification time.Time + expiration time.Time + effective time.Time +} + +type bootVolumeDescriptor struct { + systemIdentifier string + bootIdentifier string + data []byte // length 1977 bytes; trailing 0x00 are stripped off +} +type terminatorVolumeDescriptor struct { +} +type supplementaryVolumeDescriptor struct { + volumeFlags uint8 + systemIdentifier string // length 32 bytes + volumeIdentifier string // length 32 bytes + volumeSize uint64 // in bytes + escapeSequences []byte // 32 bytes + setSize uint16 + sequenceNumber uint16 + blocksize uint16 + pathTableSize uint32 + pathTableLLocation uint32 + pathTableLOptionalLocation uint32 + pathTableMLocation uint32 + pathTableMOptionalLocation uint32 + rootDirectoryEntry *directoryEntry + volumeSetIdentifier string // 128 bytes + publisherIdentifier string // 128 bytes + preparerIdentifier string // 128 bytes + applicationIdentifier string // 128 bytes + copyrightFile string // 37 bytes + abstractFile string // 37 bytes + bibliographicFile string // 37 bytes + creation time.Time + modification time.Time + expiration time.Time + effective time.Time +} +type partitionVolumeDescriptor struct { + data []byte // length 2048 bytes; trailing 0x00 are stripped off +} + +type volumeDescriptors struct { + descriptors []volumeDescriptor + primary *primaryVolumeDescriptor +} + +func (v *volumeDescriptors) equal(a *volumeDescriptors) bool { + if len(v.descriptors) != len(a.descriptors) { + return false + } + // just convert everything to bytes and compare + return bytes.Compare(v.toBytes(), a.toBytes()) == 0 +} + +func (v *volumeDescriptors) toBytes() []byte { + b := make([]byte, 0, 20) + for _, d := range v.descriptors { + b = append(b, d.toBytes()...) + } + return b +} + +// primaryVolumeDescriptor +func (v *primaryVolumeDescriptor) Type() volumeDescriptorType { + return volumeDescriptorPrimary +} +func (v *primaryVolumeDescriptor) equal(a volumeDescriptor) bool { + return bytes.Compare(v.toBytes(), a.toBytes()) == 0 +} +func (v *primaryVolumeDescriptor) toBytes() []byte { + b := volumeDescriptorFirstBytes(volumeDescriptorPrimary) + + copy(b[8:40], []byte(v.systemIdentifier)) + copy(b[40:72], []byte(v.volumeIdentifier)) + binary.LittleEndian.PutUint32(b[80:84], v.volumeSize) + binary.BigEndian.PutUint32(b[84:88], v.volumeSize) + binary.LittleEndian.PutUint16(b[120:122], v.setSize) + binary.BigEndian.PutUint16(b[122:124], v.setSize) + binary.LittleEndian.PutUint16(b[124:126], v.sequenceNumber) + binary.BigEndian.PutUint16(b[126:128], v.sequenceNumber) + binary.LittleEndian.PutUint16(b[128:130], v.blocksize) + binary.BigEndian.PutUint16(b[130:132], v.blocksize) + binary.LittleEndian.PutUint32(b[132:136], v.pathTableSize) + binary.BigEndian.PutUint32(b[136:140], v.pathTableSize) + binary.LittleEndian.PutUint32(b[140:144], v.pathTableLLocation) + binary.LittleEndian.PutUint32(b[144:148], v.pathTableLOptionalLocation) + binary.BigEndian.PutUint32(b[148:152], v.pathTableMLocation) + binary.BigEndian.PutUint32(b[152:156], v.pathTableMOptionalLocation) + + rootDirEntry := make([]byte, 34) + if v.rootDirectoryEntry != nil { + rootDirEntry, _ = v.rootDirectoryEntry.toBytes() + } + copy(b[156:156+34], rootDirEntry) + + copy(b[190:190+128], []byte(v.volumeSetIdentifier)) + copy(b[318:318+128], []byte(v.publisherIdentifier)) + copy(b[446:446+128], []byte(v.preparerIdentifier)) + copy(b[574:574+128], []byte(v.applicationIdentifier)) + copy(b[702:702+37], []byte(v.copyrightFile)) + copy(b[739:739+37], []byte(v.abstractFile)) + copy(b[776:776+37], []byte(v.bibliographicFile)) + copy(b[813:813+17], timeToDecBytes(v.creation)) + copy(b[830:830+17], timeToDecBytes(v.modification)) + copy(b[847:847+17], timeToDecBytes(v.expiration)) + copy(b[864:864+17], timeToDecBytes(v.effective)) + + // these two are set by the standard + b[881] = 1 + b[882] = 0 + + return b +} + +// volumeDescriptorFromBytes create a volumeDescriptor struct from bytes +func volumeDescriptorFromBytes(b []byte) (volumeDescriptor, error) { + if len(b) != int(volumeDescriptorSize) { + return nil, fmt.Errorf("Cannot read volume descriptor from bytes of length %d, must be %d", len(b), volumeDescriptorSize) + } + // validate the signature + tmpb := make([]byte, 8, 8) + copy(tmpb[3:8], b[1:6]) + signature := binary.BigEndian.Uint64(tmpb) + if signature != isoIdentifier { + return nil, fmt.Errorf("Mismatched ISO identifier in Volume Descriptor. Found %x expected %x", signature, isoIdentifier) + } + // validate the version + version := b[6] + if version != isoVersion { + return nil, fmt.Errorf("Mismatched ISO version in Volume Descriptor. Found %x expected %x", version, isoVersion) + } + // get the type and data - later we will be more intelligent about this and read actual primary volume info + vdType := volumeDescriptorType(b[0]) + var vd volumeDescriptor + var err error + + switch vdType { + case volumeDescriptorPrimary: + vd, err = parsePrimaryVolumeDescriptor(b) + if err != nil { + return nil, fmt.Errorf("Unable to parse primary volume descriptor bytes: %v", err) + } + case volumeDescriptorBoot: + vd = &bootVolumeDescriptor{ + systemIdentifier: string(b[7:39]), + bootIdentifier: string(b[39:71]), + data: b[71:2048], + } + case volumeDescriptorTerminator: + vd = &terminatorVolumeDescriptor{} + case volumeDescriptorPartition: + vd = &partitionVolumeDescriptor{ + data: b[8:volumeDescriptorSize], + } + case volumeDescriptorSupplementary: + vd, err = parseSupplementaryVolumeDescriptor(b) + if err != nil { + return nil, fmt.Errorf("Unable to parse primary volume descriptor bytes: %v", err) + } + default: + return nil, fmt.Errorf("Unknown volume descriptor type %d", vdType) + } + return vd, nil +} + +func parsePrimaryVolumeDescriptor(b []byte) (*primaryVolumeDescriptor, error) { + blocksize := binary.LittleEndian.Uint16(b[128:130]) + + creation, err := decBytesToTime(b[813 : 813+17]) + if err != nil { + return nil, fmt.Errorf("Unable to convert creation date/time from bytes: %v", err) + } + modification, err := decBytesToTime(b[830 : 830+17]) + if err != nil { + return nil, fmt.Errorf("Unable to convert modification date/time from bytes: %v", err) + } + // expiration can be never + nullBytes := []byte{48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 0} + var expiration, effective time.Time + expirationBytes := b[847 : 847+17] + effectiveBytes := b[864 : 864+17] + if bytes.Compare(expirationBytes, nullBytes) != 0 { + expiration, err = decBytesToTime(expirationBytes) + if err != nil { + return nil, fmt.Errorf("Unable to convert expiration date/time from bytes: %v", err) + } + } + if bytes.Compare(effectiveBytes, nullBytes) != 0 { + effective, err = decBytesToTime(effectiveBytes) + if err != nil { + return nil, fmt.Errorf("Unable to convert effective date/time from bytes: %v", err) + } + } + + rootDirEntry, err := dirEntryFromBytes(b[156 : 156+34]) + if err != nil { + return nil, fmt.Errorf("Unable to read root directory entry: %v", err) + } + + return &primaryVolumeDescriptor{ + systemIdentifier: string(b[8:40]), + volumeIdentifier: string(b[40:72]), + volumeSize: binary.LittleEndian.Uint32(b[80:84]), + setSize: binary.LittleEndian.Uint16(b[120:122]), + sequenceNumber: binary.LittleEndian.Uint16(b[124:126]), + blocksize: blocksize, + pathTableSize: binary.LittleEndian.Uint32(b[132:136]), + pathTableLLocation: binary.LittleEndian.Uint32(b[140:144]), + pathTableLOptionalLocation: binary.LittleEndian.Uint32(b[144:148]), + pathTableMLocation: binary.BigEndian.Uint32(b[148:152]), + pathTableMOptionalLocation: binary.BigEndian.Uint32(b[152:156]), + volumeSetIdentifier: string(b[190 : 190+128]), + publisherIdentifier: string(b[318 : 318+128]), + preparerIdentifier: string(b[446 : 446+128]), + applicationIdentifier: string(b[574 : 574+128]), + copyrightFile: string(b[702 : 702+37]), + abstractFile: string(b[739 : 739+37]), + bibliographicFile: string(b[776 : 776+37]), + creation: creation, + modification: modification, + expiration: expiration, + effective: effective, + rootDirectoryEntry: rootDirEntry, + }, nil +} + +// terminatorVolumeDescriptor +func (v *terminatorVolumeDescriptor) Type() volumeDescriptorType { + return volumeDescriptorTerminator +} +func (v *terminatorVolumeDescriptor) equal(a volumeDescriptor) bool { + return bytes.Compare(v.toBytes(), a.toBytes()) == 0 +} +func (v *terminatorVolumeDescriptor) toBytes() []byte { + b := volumeDescriptorFirstBytes(volumeDescriptorTerminator) + return b +} + +// bootVolumeDescriptor +func (v *bootVolumeDescriptor) Type() volumeDescriptorType { + return volumeDescriptorBoot +} +func (v *bootVolumeDescriptor) equal(a volumeDescriptor) bool { + return bytes.Compare(v.toBytes(), a.toBytes()) == 0 +} +func (v *bootVolumeDescriptor) toBytes() []byte { + b := volumeDescriptorFirstBytes(volumeDescriptorBoot) + copy(b[7:39], []byte(v.systemIdentifier)) + copy(b[39:71], []byte(v.bootIdentifier)) + copy(b[71:2048], v.data) + + return b +} + +// supplementaryVolumeDescriptor +func parseSupplementaryVolumeDescriptor(b []byte) (*supplementaryVolumeDescriptor, error) { + blocksize := binary.LittleEndian.Uint16(b[128:130]) + volumesize := binary.LittleEndian.Uint32(b[80:84]) + volumesizeBytes := uint64(blocksize) * uint64(volumesize) + + creation, err := decBytesToTime(b[813 : 813+17]) + if err != nil { + return nil, fmt.Errorf("Unable to convert creation date/time from bytes: %v", err) + } + modification, err := decBytesToTime(b[830 : 830+17]) + if err != nil { + return nil, fmt.Errorf("Unable to convert modification date/time from bytes: %v", err) + } + expiration, err := decBytesToTime(b[847 : 847+17]) + if err != nil { + return nil, fmt.Errorf("Unable to convert expiration date/time from bytes: %v", err) + } + effective, err := decBytesToTime(b[864 : 864+17]) + if err != nil { + return nil, fmt.Errorf("Unable to convert effective date/time from bytes: %v", err) + } + + rootDirEntry, err := dirEntryFromBytes(b[156 : 156+34]) + if err != nil { + return nil, fmt.Errorf("Unable to read root directory entry: %v", err) + } + + return &supplementaryVolumeDescriptor{ + systemIdentifier: string(b[8:40]), + volumeIdentifier: string(b[40:72]), + volumeSize: volumesizeBytes, + setSize: binary.LittleEndian.Uint16(b[120:122]), + sequenceNumber: binary.LittleEndian.Uint16(b[124:126]), + blocksize: blocksize, + pathTableSize: binary.LittleEndian.Uint32(b[132:136]), + pathTableLLocation: binary.LittleEndian.Uint32(b[140:144]), + pathTableLOptionalLocation: binary.LittleEndian.Uint32(b[144:148]), + pathTableMLocation: binary.BigEndian.Uint32(b[148:152]), + pathTableMOptionalLocation: binary.BigEndian.Uint32(b[152:156]), + volumeSetIdentifier: bytesToUCS2String(b[190 : 190+128]), + publisherIdentifier: bytesToUCS2String(b[318 : 318+128]), + preparerIdentifier: bytesToUCS2String(b[446 : 446+128]), + applicationIdentifier: bytesToUCS2String(b[574 : 574+128]), + copyrightFile: bytesToUCS2String(b[702 : 702+37]), + abstractFile: bytesToUCS2String(b[739 : 739+37]), + bibliographicFile: bytesToUCS2String(b[776 : 776+37]), + creation: creation, + modification: modification, + expiration: expiration, + effective: effective, + rootDirectoryEntry: rootDirEntry, + }, nil +} +func (v *supplementaryVolumeDescriptor) Type() volumeDescriptorType { + return volumeDescriptorSupplementary +} +func (v *supplementaryVolumeDescriptor) equal(a volumeDescriptor) bool { + return bytes.Compare(v.toBytes(), a.toBytes()) == 0 +} +func (v *supplementaryVolumeDescriptor) toBytes() []byte { + b := volumeDescriptorFirstBytes(volumeDescriptorSupplementary) + + copy(b[8:40], []byte(v.systemIdentifier)) + copy(b[40:72], []byte(v.volumeIdentifier)) + blockcount := uint32(v.volumeSize / uint64(v.blocksize)) + binary.LittleEndian.PutUint32(b[80:84], blockcount) + binary.BigEndian.PutUint32(b[84:88], blockcount) + binary.LittleEndian.PutUint16(b[120:122], v.setSize) + binary.BigEndian.PutUint16(b[122:124], v.setSize) + binary.LittleEndian.PutUint16(b[124:126], v.sequenceNumber) + binary.BigEndian.PutUint16(b[126:128], v.sequenceNumber) + binary.LittleEndian.PutUint16(b[128:130], v.blocksize) + binary.BigEndian.PutUint16(b[130:132], v.blocksize) + binary.LittleEndian.PutUint32(b[132:136], v.pathTableSize) + binary.BigEndian.PutUint32(b[136:140], v.pathTableSize) + binary.LittleEndian.PutUint32(b[140:144], v.pathTableLLocation) + binary.LittleEndian.PutUint32(b[144:148], v.pathTableLOptionalLocation) + binary.BigEndian.PutUint32(b[148:152], v.pathTableMLocation) + binary.BigEndian.PutUint32(b[152:156], v.pathTableMOptionalLocation) + + rootDirEntry, _ := v.rootDirectoryEntry.toBytes() + copy(b[156:156+34], rootDirEntry) + + copy(b[190:190+128], ucs2StringToBytes(v.volumeSetIdentifier)) + copy(b[318:318+128], ucs2StringToBytes(v.publisherIdentifier)) + copy(b[446:446+128], ucs2StringToBytes(v.preparerIdentifier)) + copy(b[574:574+128], ucs2StringToBytes(v.applicationIdentifier)) + copy(b[702:702+37], ucs2StringToBytes(v.copyrightFile)) + copy(b[739:739+37], ucs2StringToBytes(v.abstractFile)) + copy(b[776:776+37], ucs2StringToBytes(v.bibliographicFile)) + copy(b[813:813+17], timeToDecBytes(v.creation)) + copy(b[830:830+17], timeToDecBytes(v.modification)) + copy(b[847:847+17], timeToDecBytes(v.expiration)) + copy(b[864:864+17], timeToDecBytes(v.effective)) + + return b +} + +// partitionVolumeDescriptor +func (v *partitionVolumeDescriptor) Type() volumeDescriptorType { + return volumeDescriptorPartition +} +func (v *partitionVolumeDescriptor) equal(a volumeDescriptor) bool { + return bytes.Compare(v.toBytes(), a.toBytes()) == 0 +} +func (v *partitionVolumeDescriptor) toBytes() []byte { + b := volumeDescriptorFirstBytes(volumeDescriptorPartition) + copy(b[7:], v.data) + return b +} + +// utilities +func volumeDescriptorFirstBytes(t volumeDescriptorType) []byte { + b := make([]byte, volumeDescriptorSize, volumeDescriptorSize) + + b[0] = byte(t) + tmpb := make([]byte, 8, 8) + binary.BigEndian.PutUint64(tmpb[:], isoIdentifier) + copy(b[1:6], tmpb[3:8]) + b[6] = isoVersion + return b +} + +func decBytesToTime(b []byte) (time.Time, error) { + year := string(b[0:4]) + month := string(b[4:6]) + date := string(b[6:8]) + hour := string(b[8:10]) + minute := string(b[10:12]) + second := string(b[12:14]) + csec := string(b[14:16]) + offset := int(int8(b[16])) + location := offset * 15 + format := "2006-01-02T15:04:05-07:00" + offsetHr := location / 60 + offsetMin := location % 60 + offsetString := "" + // if negative offset, show it just on the hour part, not twice, so we end up with "-06:30" and not "-06:-30" + switch { + case offset == 0: + offsetString = "+00:00" + case offset < 0: + offsetString = fmt.Sprintf("-%02d:%02d", -offsetHr, -offsetMin) + case offset > 0: + offsetString = fmt.Sprintf("+%02d:%02d", offsetHr, offsetMin) + } + return time.Parse(format, fmt.Sprintf("%s-%s-%sT%s:%s:%s.%s%s", year, month, date, hour, minute, second, csec, offsetString)) +} +func timeToDecBytes(t time.Time) []byte { + year := strconv.Itoa(t.Year()) + month := strconv.Itoa(int(t.Month())) + date := strconv.Itoa(t.Day()) + hour := strconv.Itoa(t.Hour()) + minute := strconv.Itoa(t.Minute()) + second := strconv.Itoa(t.Second()) + csec := strconv.Itoa(t.Nanosecond() / 1e+7) + _, offset := t.Zone() + b := make([]byte, 17, 17) + copy(b[0:4], []byte(fmt.Sprintf("%04s", year))) + copy(b[4:6], []byte(fmt.Sprintf("%02s", month))) + copy(b[6:8], []byte(fmt.Sprintf("%02s", date))) + copy(b[8:10], []byte(fmt.Sprintf("%02s", hour))) + copy(b[10:12], []byte(fmt.Sprintf("%02s", minute))) + copy(b[12:14], []byte(fmt.Sprintf("%02s", second))) + copy(b[14:16], []byte(fmt.Sprintf("%02s", csec))) + b[16] = byte(offset / 60 / 15) + return b +} diff --git a/filesystem/iso9660/volume_descriptor_internal_test.go b/filesystem/iso9660/volume_descriptor_internal_test.go new file mode 100644 index 00000000..34c636ed --- /dev/null +++ b/filesystem/iso9660/volume_descriptor_internal_test.go @@ -0,0 +1,281 @@ +package iso9660 + +import ( + "bytes" + "fmt" + "io/ioutil" + "testing" + "time" +) + +const ( + volRecordsFile = "./testdata/volrecords.iso" +) + +var ( + timeDecBytesTests = []struct { + b []byte + rfc string + }{ + // see reference at https://wiki.osdev.org/ISO_9660#Volume_Descriptors + {append([]byte("1980010214353600"), 0), "1980-01-02T14:35:36Z"}, + {append([]byte("1995112500160700"), 8), "1995-11-25T00:16:07+02:00"}, + {append([]byte("2101063012000000"), 0xe6), "2101-06-30T12:00:00-06:30"}, + } +) + +func comparePrimaryVolumeDescriptorsIgnoreDates(a, b *primaryVolumeDescriptor) bool { + now := time.Now() + // copy values so we do not mess up the originals + c := &primaryVolumeDescriptor{} + d := &primaryVolumeDescriptor{} + *c = *a + *d = *b + + // unify fields we let be equal + c.creation = now + d.creation = now + c.effective = now + d.effective = now + c.modification = now + d.modification = now + c.expiration = now + d.expiration = now + + // cannot actually compare root directory entry since can be pointers to different things + // so we compare them separately, and then compare the rest + if *c.rootDirectoryEntry != *c.rootDirectoryEntry { + return false + } + c.rootDirectoryEntry = nil + d.rootDirectoryEntry = nil + return *c == *d +} +func comparePrimaryVolumeDescriptorsBytesIgnoreDates(a []byte, b []byte) bool { + aNull := primaryVolumeDescriptorsBytesNullDate(a) + bNull := primaryVolumeDescriptorsBytesNullDate(b) + + // we ignore the reserved areas that are unused + return bytes.Compare(aNull[:883], bNull[:883]) == 0 +} +func primaryVolumeDescriptorsBytesNullDate(a []byte) []byte { + // null the volume dates + dateLocations := []int{813, 830, 847, 864} + length := 17 + now := make([]byte, length) + a1 := make([]byte, len(a)) + copy(a1, a) + for _, i := range dateLocations { + copy(a1[i:i+length], now) + } + // null the root directory entry dates + rootEntry := a1[156 : 156+34] + r1 := make([]byte, len(rootEntry)) + copy(r1, rootEntry) + copy(a1[156:156+34], directoryEntryBytesNullDate(r1)) + return a1 +} + +func getValidVolumeDescriptors() ([]volumeDescriptor, []byte, error) { + blocksize := uint16(2048) + // read correct bytes off of disk + b, err := ioutil.ReadFile(volRecordsFile) + if err != nil { + return nil, nil, fmt.Errorf("Error reading data from volrecords test fixture %s: %v", volRecordsFile, err) + } + + // sector 0 - Primary Volume Descriptor + // sector 1 - Boot Volume Descriptor + // sector 2 - Supplemental Volume Descriptor + // sector 3 - Volume Descriptor Set Terminator + + t1 := time.Now() + entries := []volumeDescriptor{ + &primaryVolumeDescriptor{ + systemIdentifier: fmt.Sprintf("%32v", ""), + volumeIdentifier: "Ubuntu-Server 18.04.1 LTS amd64 ", + volumeSize: 415744, // in bytes + setSize: 1, + sequenceNumber: 1, + blocksize: blocksize, + pathTableSize: 972, + pathTableLLocation: 114, + pathTableLOptionalLocation: 0, + pathTableMLocation: 115, + pathTableMOptionalLocation: 0, + rootDirectoryEntry: &directoryEntry{}, + volumeSetIdentifier: fmt.Sprintf("%128v", ""), + publisherIdentifier: fmt.Sprintf("%128v", ""), + preparerIdentifier: fmt.Sprintf("%-128v", "XORRISO-1.2.4 2012.07.20.130001, LIBISOBURN-1.2.4, LIBISOFS-1.2.4, LIBBURN-1.2.4"), + applicationIdentifier: fmt.Sprintf("%128v", ""), + copyrightFile: fmt.Sprintf("%37v", ""), + abstractFile: fmt.Sprintf("%37v", ""), + bibliographicFile: fmt.Sprintf("%37v", ""), + creation: t1, + modification: t1, + expiration: t1, + effective: t1, + }, + &bootVolumeDescriptor{ + systemIdentifier: "EL TORITO SPECIFICATION", + bootIdentifier: "", + // data for el torito is first byte shows boot catalog location (block) followed by all zeros + data: []byte{0x71}, + }, + &supplementaryVolumeDescriptor{ + systemIdentifier: fmt.Sprintf("%32v", ""), + volumeIdentifier: "Ubuntu-Server 18", + volumeSize: 415744, // in bytes + escapeSequences: []byte{0x25, 0x2F, 0x45}, + setSize: 1, + sequenceNumber: 1, + blocksize: blocksize, + pathTableSize: 1386, + pathTableLLocation: 190, + pathTableLOptionalLocation: 0, + pathTableMLocation: 191, + pathTableMOptionalLocation: 0, + rootDirectoryEntry: &directoryEntry{}, + volumeSetIdentifier: fmt.Sprintf("%128v", ""), + publisherIdentifier: fmt.Sprintf("%128v", ""), + preparerIdentifier: fmt.Sprintf("%-128v", "XORRISO-1.2.4 2012.07.20.130001, LIBISOBURN-1.2.4, LIBISOFS-1.2."), + applicationIdentifier: fmt.Sprintf("%128v", ""), + copyrightFile: fmt.Sprintf("%37v", ""), + abstractFile: fmt.Sprintf("%37v", ""), + bibliographicFile: fmt.Sprintf("%37v", ""), + creation: t1, + modification: t1, + expiration: t1, + effective: t1, + }, + &terminatorVolumeDescriptor{}, + } + + return entries, b, nil +} + +func getValidPrimaryVolumeDescriptor() (*primaryVolumeDescriptor, []byte, error) { + // these are taken from the file ./testdata/fat32.img, see ./testdata/README.md + blocksize := 2048 + pvdSector := 16 + t1, _ := time.Parse(time.RFC3339, "2017-11-26T07:53:16Z") + + // read correct bytes off of disk + input, err := ioutil.ReadFile(ISO9660File) + if err != nil { + return nil, nil, fmt.Errorf("Error reading data from iso9660 test fixture %s: %v", ISO9660File, err) + } + + start := pvdSector * blocksize // PVD sector + + // five blocks, since we know it is five blocks + allBytes := input[start : start+blocksize] + + pvd := &primaryVolumeDescriptor{ + systemIdentifier: fmt.Sprintf("%32v", ""), + volumeIdentifier: "ISOIMAGE ", + volumeSize: 5380, // in bytes + setSize: 1, + sequenceNumber: 1, + blocksize: 2048, + pathTableSize: 46, + pathTableLLocation: 27, + pathTableLOptionalLocation: 0, + pathTableMLocation: 28, + pathTableMOptionalLocation: 0, + rootDirectoryEntry: &directoryEntry{}, + volumeSetIdentifier: fmt.Sprintf("%128v", ""), + publisherIdentifier: fmt.Sprintf("%128v", ""), + preparerIdentifier: fmt.Sprintf("%-128v", "XORRISO-1.4.8 2017.09.12.143001, LIBISOBURN-1.4.8, LIBISOFS-1.4.8, LIBBURN-1.4.8"), + applicationIdentifier: fmt.Sprintf("%128v", ""), + copyrightFile: fmt.Sprintf("%37v", ""), + abstractFile: fmt.Sprintf("%37v", ""), + bibliographicFile: fmt.Sprintf("%37v", ""), + creation: t1, + modification: t1, + expiration: t1, + effective: t1, + } + // we need the root directoryEntry + rootDirEntry := &directoryEntry{ + extAttrSize: 0, + location: 0x12, + size: 0x800, + creation: t1, + isHidden: false, + isSubdirectory: true, + isAssociated: false, + hasExtendedAttrs: false, + hasOwnerGroupPermissions: false, + hasMoreEntries: false, + volumeSequence: 1, + filename: "", + isSelf: true, + filesystem: nil, + } + pvd.rootDirectoryEntry = rootDirEntry + return pvd, allBytes, nil +} + +func TestDecBytesToTime(t *testing.T) { + for _, tt := range timeDecBytesTests { + output, err := decBytesToTime(tt.b) + if err != nil { + t.Fatalf("Error parsing actual date: %v", err) + } + expected, err := time.Parse(time.RFC3339, tt.rfc) + if err != nil { + t.Fatalf("Error parsing expected date: %v", err) + } + if !expected.Equal(output) { + t.Errorf("decBytesToTime(%d) expected output %v, actual %v", tt.b, expected, output) + } + } +} + +func TestTimeToDecBytes(t *testing.T) { + for _, tt := range timeDecBytesTests { + input, err := time.Parse(time.RFC3339, tt.rfc) + if err != nil { + t.Fatalf("Error parsing input date: %v", err) + } + b := timeToDecBytes(input) + if bytes.Compare(b, tt.b) != 0 { + t.Errorf("timeToBytes(%v) expected then actual \n% x\n% x", tt.rfc, tt.b, b) + } + } +} + +func TestPrimaryVolumeDescriptorToBytes(t *testing.T) { + validPvd, validBytes, err := getValidPrimaryVolumeDescriptor() + if err != nil { + t.Fatal(err) + } + b := validPvd.toBytes() + if !comparePrimaryVolumeDescriptorsBytesIgnoreDates(b, validBytes) { + t.Errorf("Mismatched bytes, actual vs expected") + t.Log(b) + t.Log(validBytes) + } +} +func TestParsePrimaryVolumeDescriptor(t *testing.T) { + validPvd, validBytes, err := getValidPrimaryVolumeDescriptor() + if err != nil { + t.Fatal(err) + } + pvd, err := parsePrimaryVolumeDescriptor(validBytes) + if err != nil { + t.Fatalf("Error parsing primary volume descriptor: %v", err) + } + if !comparePrimaryVolumeDescriptorsIgnoreDates(pvd, validPvd) { + t.Errorf("Mismatched primary volume descriptor, actual vs expected") + t.Logf("%#v\n", pvd) + t.Logf("%#v\n", validPvd) + } +} +func TestPrimaryVolumeDescriptorType(t *testing.T) { + pvd := &primaryVolumeDescriptor{} + if pvd.Type() != volumeDescriptorPrimary { + t.Errorf("Primary Volume Descriptor type was %v instead of expected %v", pvd.Type(), volumeDescriptorPrimary) + } +} diff --git a/testhelper/fileimpl.go b/testhelper/fileimpl.go index 6f5734b9..892e3c10 100644 --- a/testhelper/fileimpl.go +++ b/testhelper/fileimpl.go @@ -1,5 +1,7 @@ package testhelper +import "fmt" + type reader func(b []byte, offset int64) (int, error) type writer func(b []byte, offset int64) (int, error) @@ -19,3 +21,8 @@ func (f *FileImpl) ReadAt(b []byte, offset int64) (int, error) { func (f *FileImpl) WriteAt(b []byte, offset int64) (int, error) { return f.Writer(b, offset) } + +// Seek seek a particular offset - does not actually work +func (f *FileImpl) Seek(offset int64, whence int) (int64, error) { + return 0, fmt.Errorf("FileImpl does not implement Seek()") +} diff --git a/util/file.go b/util/file.go index 2572c77c..375da34a 100644 --- a/util/file.go +++ b/util/file.go @@ -9,4 +9,5 @@ import "io" type File interface { io.ReaderAt io.WriterAt + io.Seeker } diff --git a/util/version.go b/util/version.go new file mode 100644 index 00000000..a19fe6a8 --- /dev/null +++ b/util/version.go @@ -0,0 +1,5 @@ +package util + +const ( + AppNameVersion = "https://github.com/deitch/diskfs" +)