Skip to content

Commit

Permalink
Merge pull request #30 from crmulliner/master
Browse files Browse the repository at this point in the history
added list_files
  • Loading branch information
jrspruitt authored Apr 16, 2019
2 parents 0a57bed + 0c62147 commit 7079dd3
Show file tree
Hide file tree
Showing 4 changed files with 362 additions and 1 deletion.
11 changes: 11 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,17 @@ extract the contents. If file includes special files, you will need to run as
root or sudo for it to create these files. With out it, it'll skip them and show a
warning that these files were not created.

## List/Copy Files:
ubireader_list_files [options] path/to/file

The script accepts a file with UBI or UBIFS data in it, so should work with a NAND
dump. It will search for the first occurance of UBI or UBIFS data and treat it as
a UBIFS. To list files supply the path to list (-P, --path), e.g. "-P /" to list
the filesystems root directory. To copy a file from the filesystem to a local directory
supply the source path (-C, --copy) and the destination path (-D, --copy-dest),
e.g. -C /etc/passwd -D . (extract /etc/passwd from the UBIFS image and copy it to
local directory).

## Extracting Images:
ubireader_extract_images [options] path/to/file

Expand Down
169 changes: 169 additions & 0 deletions scripts/ubireader_list_files
Original file line number Diff line number Diff line change
@@ -0,0 +1,169 @@
#!/usr/bin/env python

#############################################################
# ubi_reader
# (C) Collin Mulliner based on Jason Pruitt's ubireader_extract_images
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.

# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#############################################################

import os
import sys
import time
import argparse

from ubireader import settings
from ubireader.ubi import ubi
from ubireader.ubi.defines import UBI_EC_HDR_MAGIC
from ubireader.ubifs import ubifs
from ubireader.ubifs.list import list_files, copy_file
from ubireader.ubifs.defines import UBIFS_NODE_MAGIC
from ubireader.ubi_io import ubi_file, leb_virtual_file
from ubireader.debug import error, log
from ubireader.utils import guess_filetype, guess_start_offset, guess_leb_size, guess_peb_size

if __name__=='__main__':
start = time.time()
description = 'List and Extract files of a UBI or UBIFS image.'
usage = 'ubireader_list_files [options] filepath'
parser = argparse.ArgumentParser(usage=usage, description=description)

parser.add_argument('-l', '--log', action='store_true', dest='log',
help='Print extraction information to screen.')

parser.add_argument('-v', '--verbose-log', action='store_true', dest='verbose',
help='Prints nearly everything about anything to screen.')

parser.add_argument('-p', '--peb-size', type=int, dest='block_size',
help='Specify PEB size. (UBI Only)')

parser.add_argument('-e', '--leb-size', type=int, dest='block_size',
help='Specify LEB size. (UBIFS Only)')

parser.add_argument('-s', '--start-offset', type=int, dest='start_offset',
help='Specify offset of UBI/UBIFS data in file. (default: 0)')

parser.add_argument('-n', '--end-offset', type=int, dest='end_offset',
help='Specify end offset of UBI/UBIFS data in file.')

parser.add_argument('-g', '--guess-offset', type=int, dest='guess_offset',
help='Specify offset to start guessing where UBI data is in file. (default: 0)')

parser.add_argument('-w', '--warn-only-block-read-errors', action='store_true', dest='warn_only_block_read_errors',
help='Attempts to continue extracting files even with bad block reads. Some data will be missing or corrupted! (default: False)')

parser.add_argument('-i', '--ignore-block-header-errors', action='store_true', dest='ignore_block_header_errors',
help='Forces unused and error containing blocks to be included and also displayed with log/verbose. (default: False)')

parser.add_argument('-P', '--path', dest='listpath',
help='Path to list.')

parser.add_argument('-C', '--copy', dest='copyfile',
help='File to Copy.')

parser.add_argument('-D', '--copy-dest', dest='copyfiledest',
help='Copy Destination.')

parser.add_argument('filepath', help='UBI/UBIFS image file.')

if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)

args = parser.parse_args()

settings.logging_on = args.log

settings.logging_on_verbose = args.verbose

settings.warn_only_block_read_errors = args.warn_only_block_read_errors

settings.ignore_block_header_errors = args.ignore_block_header_errors

if args.filepath:
path = args.filepath
if not os.path.exists(path):
parser.error("File path doesn't exist.")

if args.start_offset:
start_offset = args.start_offset
elif args.guess_offset:
start_offset = guess_start_offset(path, args.guess_offset)
else:
start_offset = guess_start_offset(path)

if args.end_offset:
end_offset = args.end_offset
else:
end_offset = None

filetype = guess_filetype(path, start_offset)
if not filetype:
parser.error('Could not determine file type.')

if args.block_size:
block_size = args.block_size
else:
if filetype == UBI_EC_HDR_MAGIC:
block_size = guess_peb_size(path)
elif filetype == UBIFS_NODE_MAGIC:
block_size = guess_leb_size(path)

if not block_size:
parser.error('Block size could not be determined.')

# Create file object.
ufile_obj = ubi_file(path, block_size, start_offset, end_offset)

if filetype == UBI_EC_HDR_MAGIC:
# Create UBI object
ubi_obj = ubi(ufile_obj)

# Loop through found images in file.
for image in ubi_obj.images:

# Loop through volumes in each image.
for volume in image.volumes:

# Get blocks associated with this volume.
vol_blocks = image.volumes[volume].get_blocks(ubi_obj.blocks)

# Skip volume if empty.
if not len(vol_blocks):
continue

# Create LEB backed virtual file with volume blocks.
# Necessary to prevent having to load entire UBI image
# into memory.
lebv_file = leb_virtual_file(ubi_obj, vol_blocks)

# Create UBIFS object.
ubifs_obj = ubifs(lebv_file)

if args.listpath:
list_files(ubifs_obj, args.listpath)
if args.copyfile and args.copyfiledest:
copy_file(ubifs_obj, args.copyfile, args.copyfiledest)

elif filetype == UBIFS_NODE_MAGIC:
# Create UBIFS object
ubifs_obj = ubifs(ufile_obj)

if args.listpath:
list_files(ubifs_obj, args.listpath)
if args.copyfile and args.copyfiledest:
copy_file(ubifs_obj, args.copyfile, args.copyfiledest)

else:
print('Something went wrong to get here.')
3 changes: 2 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

from setuptools import setup, find_packages

version = '0.6.2'
version = '0.6.3'

setup(
name='ubi_reader',
Expand All @@ -20,6 +20,7 @@
packages = find_packages(),
scripts=['scripts/ubireader_display_info',
'scripts/ubireader_extract_files',
'scripts/ubireader_list_files',
'scripts/ubireader_extract_images',
'scripts/ubireader_utils_info'
],
Expand Down
180 changes: 180 additions & 0 deletions ubireader/ubifs/list.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,180 @@
#!/usr/bin/env python
#############################################################
# ubi_reader/ubifs
# (C) Collin Mulliner based on Jason Pruitt's output.py

# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.

# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#############################################################

import os
import struct

import time
from ubireader import settings
from ubireader.ubifs.defines import *
from ubireader.ubifs import walk
from ubireader.ubifs.misc import decompress
from ubireader.debug import error, log, verbose_log


def list_files(ubifs, list_path):
pathnames = list_path.split("/")
pnames = []
for i in pathnames:
if len(i) > 0:
pnames.append(i)
try:
inodes = {}
bad_blocks = []

walk.index(ubifs, ubifs.master_node.root_lnum, ubifs.master_node.root_offs, inodes, bad_blocks)

if len(inodes) < 2:
raise Exception('No inodes found')

inum = find_dir(inodes, 1, pnames, 0)

if inum == None:
return

if not 'dent' in inodes[inum]:
return

for dent in inodes[inum]['dent']:
print_dent(ubifs, inodes, dent, longts=False)

if len(bad_blocks):
error(list_files, 'Warning', 'Data may be missing or corrupted, bad blocks, LEB [%s]' % ','.join(map(str, bad_blocks)))

except Exception as e:
error(list_files, 'Error', '%s' % e)


def copy_file(ubifs, filepath, destpath):
pathnames = filepath.split("/")
pnames = []
for i in pathnames:
if len(i) > 0:
pnames.append(i)

filename = pnames[len(pnames)-1]
del pnames[-1]

inodes = {}
bad_blocks = []

walk.index(ubifs, ubifs.master_node.root_lnum, ubifs.master_node.root_offs, inodes, bad_blocks)

if len(inodes) < 2:
return False

inum = find_dir(inodes, 1, pnames, 0)

if inum == None:
return False

if not 'dent' in inodes[inum]:
return False

for dent in inodes[inum]['dent']:
if dent.name == filename:
filedata = _process_reg_file(ubifs, inodes[dent.inum], filepath)
if os.path.isdir(destpath):
destpath = os.path.join(destpath, filename)
with open(destpath, 'wb') as f:
f.write(filedata)
return True
return False


def find_dir(inodes, inum, names, idx):
if len(names) == 0:
return 1
for dent in inodes[inum]['dent']:
if dent.name == names[idx]:
if len(names) == idx+1:
return dent.inum
else:
return find_dir(inodes, dent.inum, names, idx+1)
return None


def print_dent(ubifs, inodes, dent_node, long=True, longts=False):
inode = inodes[dent_node.inum]
if long:
fl = file_leng(ubifs, inode)

lnk = ""
if dent_node.type == UBIFS_ITYPE_LNK:
lnk = " -> " + inode['ino'].data.decode('utf-8')

if longts:
mtime = inode['ino'].mtime_sec
else:
mtime = time.strftime("%b %d %H:%M", time.gmtime(inode['ino'].mtime_sec))

print('%6o %2d %s %s %7d %s %s%s' % (inode['ino'].mode, inode['ino'].nlink, inode['ino'].uid, inode['ino'].gid, fl, mtime, dent_node.name, lnk))
else:
print(dent_node.name)


def file_leng(ubifs, inode):
fl = 0
if 'data' in inode:
compr_type = 0
sorted_data = sorted(inode['data'], key=lambda x: x.key['khash'])
last_khash = sorted_data[0].key['khash']-1

for data in sorted_data:
if data.key['khash'] - last_khash != 1:
while 1 != (data.key['khash'] - last_khash):
last_khash += 1
fl = fl + UBIFS_BLOCK_SIZE
fl = fl + data.size
return fl
return 0


def _process_reg_file(ubifs, inode, path):
try:
buf = b''
if 'data' in inode:
compr_type = 0
sorted_data = sorted(inode['data'], key=lambda x: x.key['khash'])
last_khash = sorted_data[0].key['khash']-1

for data in sorted_data:

# If data nodes are missing in sequence, fill in blanks
# with \x00 * UBIFS_BLOCK_SIZE
if data.key['khash'] - last_khash != 1:
while 1 != (data.key['khash'] - last_khash):
buf += b'\x00'*UBIFS_BLOCK_SIZE
last_khash += 1

compr_type = data.compr_type
ubifs.file.seek(data.offset)
d = ubifs.file.read(data.compr_len)
buf += decompress(compr_type, data.size, d)
last_khash = data.key['khash']
verbose_log(_process_reg_file, 'ino num: %s, compression: %s, path: %s' % (inode['ino'].key['ino_num'], compr_type, path))

except Exception as e:
error(_process_reg_file, 'Warn', 'inode num:%s :%s' % (inode['ino'].key['ino_num'], e))

# Pad end of file with \x00 if needed.
if inode['ino'].size > len(buf):
buf += b'\x00' * (inode['ino'].size - len(buf))

return buf

0 comments on commit 7079dd3

Please sign in to comment.