Skip to content

Commit

Permalink
i#2641 lib gap: handle conflict maps and the brk (#2648)
Browse files Browse the repository at this point in the history
Solves several remaining issues beyond what the first #2641 commit addressed:

Moves the libdynamorio text-data gap filling earlier to prevent our own mmaps, such as for the brk, from landing there.

Adds explicit space for the brk in the initial app mmap, to deal with top-down packing by the kernel for PIE which otherwise leaves no space for the brk.

Refactors the scheme of "conflict mmaps" used for reloading libdynamorio to avoid clobbering existing mappings.

Adds more sanity check asserts regarding the libdynamorio text-data gap.

On a reload, properly unmaps the libdynamorio .bss, even when the kernel mis-labels it as "[heap]".

Fixes #2641
  • Loading branch information
derekbruening authored and fhahn committed Dec 4, 2017
1 parent 7a72ea6 commit 3f848e7
Showing 1 changed file with 133 additions and 59 deletions.
192 changes: 133 additions & 59 deletions core/unix/loader.c
Original file line number Diff line number Diff line change
Expand Up @@ -66,10 +66,6 @@

extern size_t wcslen(const wchar_t *str); /* in string.c */

#if 1//DO NOT CHECK IN
bool vvar_in_gap;
#endif

/* Written during initialization only */
/* FIXME: i#460, the path lookup itself is a complicated process,
* so we just list possible common but in-complete paths for now.
Expand Down Expand Up @@ -257,24 +253,29 @@ os_loader_init_prologue(void)
/* If DR was loaded by system ld.so, then .dynamic *was* relocated (i#1589) */
privload_create_os_privmod_data(mod, !DYNAMO_OPTION(early_inject));
libdr_opd = (os_privmod_data_t *) mod->os_privmod_data;
if (DYNAMO_OPTION(early_inject)) {
/* i#1659: fill in the text-data segment gap to ensure no mmaps in between.
* The kernel does not do this. Our private loader does, so if we reloaded
* ourselves this is already in place, but it's expensive to query so
* we blindly clobber w/ another no-access mapping.
*/
int i;
for (i = 0; i <libdr_opd->os_data.num_segments - 1; i++) {
size_t sz = libdr_opd->os_data.segments[i+1].start -
libdr_opd->os_data.segments[i].end;
if (sz > 0) {
DEBUG_DECLARE(byte *fill =)
os_map_file(-1, &sz, 0, libdr_opd->os_data.segments[i].end,
MEMPROT_NONE, MAP_FILE_COPY_ON_WRITE|MAP_FILE_FIXED);
ASSERT(fill != NULL);
DODEBUG({
if (DYNAMO_OPTION(early_inject)) {
/* We've already filled the gap in dynamorio_lib_gap_empty(). We just
* verify here now that we have segment info.
*/
int i;
for (i = 0; i <libdr_opd->os_data.num_segments - 1; i++) {
size_t sz = libdr_opd->os_data.segments[i+1].start -
libdr_opd->os_data.segments[i].end;
if (sz > 0) {
dr_mem_info_t info;
bool ok = query_memory_ex_from_os(libdr_opd->os_data.segments[i].end,
&info);
ASSERT(ok);
ASSERT(info.base_pc == libdr_opd->os_data.segments[i].end &&
info.size == sz &&
(info.type == DR_MEMTYPE_FREE ||
/* If we reloaded DR, our own loader filled it in. */
info.prot == DR_MEMPROT_NONE));
}
}
}
}
});
mod->externally_loaded = true;
# if defined(LINUX)/*i#1285*/ && (defined(INTERNAL) || defined(CLIENT_INTERFACE))
if (DYNAMO_OPTION(early_inject)) {
Expand Down Expand Up @@ -1510,6 +1511,25 @@ reserve_brk(app_pc post_app)
}
}

byte *
map_exe_file_and_brk(file_t f, size_t *size INOUT, uint64 offs, app_pc addr, uint prot,
map_flags_t map_flags)
{
/* A little hacky: we assume the MEMPROT_NONE is the overall mmap for the whole
* region, where our goal is to push it back for top-down PIE filling to leave
* room for a reasonable brk.
*/
if (prot == MEMPROT_NONE && offs == 0) {
size_t sz_with_brk = *size + APP_BRK_GAP;
byte *res = os_map_file(f, &sz_with_brk, offs, addr, prot, map_flags);
if (res != NULL)
os_unmap_file(res + sz_with_brk - APP_BRK_GAP, APP_BRK_GAP);
*size = sz_with_brk - APP_BRK_GAP;
return res;
} else
return os_map_file(f, size, offs, addr, prot, map_flags);
}

/* XXX: This routine is called before dynamorio relocation when we are in a
* fragile state and thus no globals access or use of ASSERT/LOG/STATS!
*/
Expand Down Expand Up @@ -1591,6 +1611,9 @@ privload_mem_is_elf_so_header(byte *mem)
return true;
}

/* Returns false if the text-data gap is not empty. Else, fills the gap with
* no-access mappings and returns true.
*/
static bool
dynamorio_lib_gap_empty(void)
{
Expand All @@ -1602,17 +1625,33 @@ dynamorio_lib_gap_empty(void)
memquery_iter_t iter;
bool res = true;
if (memquery_iterator_start(&iter, NULL, false/*no heap*/)) {
while (memquery_iterator_next(&iter)) {
if (iter.vm_start >= get_dynamorio_dll_start() &&
iter.vm_end <= get_dynamorio_dll_end() &&
byte *dr_start = get_dynamorio_dll_start();
byte *dr_end = get_dynamorio_dll_end();
byte *gap_start = dr_start;
while (memquery_iterator_next(&iter) && iter.vm_start < dr_end) {
if (iter.vm_start >= dr_start && iter.vm_end <= dr_end &&
iter.comment[0] != '\0' &&
strstr(iter.comment, DYNAMORIO_LIBRARY_NAME) == NULL) {
/* There's a non-.bss mapping inside: probably vvar and/or vdso. */
/* There's a non-anon mapping inside: probably vvar and/or vdso. */
res = false;
break;
}
if (iter.vm_start >= get_dynamorio_dll_end())
break;
/* i#1659: fill in the text-data segment gap to ensure no mmaps in between.
* The kernel does not do this. Our private loader does, so if we reloaded
* ourselves this is already in place. We do this now rather than in
* os_loader_init_prologue() to prevent our brk mmap from landing here.
*/
if (iter.vm_start > gap_start) {
size_t sz = iter.vm_start - gap_start;
ASSERT(sz > 0);
DEBUG_DECLARE(byte *fill =)
os_map_file(-1, &sz, 0, gap_start,
MEMPROT_NONE, MAP_FILE_COPY_ON_WRITE|MAP_FILE_FIXED);
ASSERT(fill != NULL);
gap_start = iter.vm_end;
} else if (iter.vm_end > gap_start) {
gap_start = iter.vm_end;
}
}
memquery_iterator_stop(&iter);
}
Expand Down Expand Up @@ -1655,33 +1694,63 @@ reload_dynamorio(void **init_sp, app_pc conflict_start, app_pc conflict_end)
{
elf_loader_t dr_ld;
os_privmod_data_t opd;
byte *dr_map, *temp1_map = NULL, *temp2_map = NULL;
byte *dr_map;
/* We expect at most vvar+vdso+stack+vsyscall => 5 different mappings
* even if they were all in the conflict area.
*/
#define MAX_TEMP_MAPS 16
byte *temp_map[MAX_TEMP_MAPS];
size_t temp_size[MAX_TEMP_MAPS];
uint num_temp_maps = 0, i;
memquery_iter_t iter;
app_pc entry;
byte *cur_dr_map = get_dynamorio_dll_start();
byte *cur_dr_end = get_dynamorio_dll_end();
size_t dr_size = cur_dr_end - cur_dr_map;
size_t temp1_size, temp2_size;
IF_DEBUG(bool success = )
elf_loader_read_headers(&dr_ld, get_dynamorio_library_path());
ASSERT(success);

/* XXX: have better strategy for picking base: currently we rely on
* the kernel picking an address, so we have to block out the conflicting
* region first.
* region first, avoiding any existing mappings (like vvar+vdso: i#2641).
*/
if (conflict_start < cur_dr_map) {
temp1_size = cur_dr_map - conflict_start;
temp1_map = os_map_file(-1, &temp1_size, 0, conflict_start, MEMPROT_NONE,
MAP_FILE_COPY_ON_WRITE | MAP_FILE_FIXED);
ASSERT(temp1_map != NULL);
}
if (conflict_end > cur_dr_end) {
/* Leave room for the brk */
conflict_end += APP_BRK_GAP;
temp2_size = conflict_end - cur_dr_end;
temp2_map = os_map_file(-1, &temp2_size, 0, cur_dr_end, MEMPROT_NONE,
MAP_FILE_COPY_ON_WRITE | MAP_FILE_FIXED);
ASSERT(temp2_map != NULL);
if (memquery_iterator_start(&iter, NULL, false/*no heap*/)) {
/* Strategy: track the leading edge ("tocover_start") of the conflict region.
* Find the next block beyond that edge so we know the safe endpoint for a
* temp mmap.
*/
byte *tocover_start = conflict_start;
while (memquery_iterator_next(&iter)) {
if (iter.vm_start > tocover_start) {
temp_map[num_temp_maps] = tocover_start;
temp_size[num_temp_maps] =
MIN(iter.vm_start, conflict_end) - tocover_start;
tocover_start = iter.vm_end;
if (temp_size[num_temp_maps] > 0) {
temp_map[num_temp_maps] =
os_map_file(-1, &temp_size[num_temp_maps], 0,
temp_map[num_temp_maps], MEMPROT_NONE,
MAP_FILE_COPY_ON_WRITE | MAP_FILE_FIXED);
ASSERT(temp_map[num_temp_maps] != NULL);
num_temp_maps++;
}
} else if (iter.vm_end > tocover_start) {
tocover_start = iter.vm_end;
}
if (iter.vm_start >= conflict_end)
break;
}
memquery_iterator_stop(&iter);
if (tocover_start < conflict_end) {
temp_map[num_temp_maps] = tocover_start;
temp_size[num_temp_maps] = conflict_end - tocover_start;
temp_map[num_temp_maps] =
os_map_file(-1, &temp_size[num_temp_maps], 0, temp_map[num_temp_maps],
MEMPROT_NONE, MAP_FILE_COPY_ON_WRITE | MAP_FILE_FIXED);
ASSERT(temp_map[num_temp_maps] != NULL);
num_temp_maps++;
}
}

/* Now load the 2nd libdynamorio.so */
Expand All @@ -1699,10 +1768,8 @@ reload_dynamorio(void **init_sp, app_pc conflict_start, app_pc conflict_end)
ASSERT(opd.tls_block_size == 0);
privload_relocate_os_privmod_data(&opd, dr_map);

if (temp1_map != NULL)
os_unmap_file(temp1_map, temp1_size);
if (temp2_map != NULL)
os_unmap_file(temp2_map, temp2_size);
for (i = 0; i < num_temp_maps; i++)
os_unmap_file(temp_map[i], temp_size[i]);

entry = (app_pc)dr_ld.ehdr->e_entry + dr_ld.load_delta;
elf_loader_destroy(&dr_ld);
Expand Down Expand Up @@ -1766,7 +1833,10 @@ privload_early_inject(void **sp, byte *old_libdr_base, size_t old_libdr_size)
while (memquery_iterator_next(&iter)) {
if (iter.vm_start >= old_libdr_base &&
iter.vm_end <= old_libdr_base + old_libdr_size &&
strstr(iter.comment, DYNAMORIO_LIBRARY_NAME) != NULL) {
(iter.comment[0] == '\0' /* .bss */ ||
/* The kernel sometimes mis-labels our .bss as "[heap]". */
strcmp(iter.comment, "[heap]") == 0 ||
strstr(iter.comment, DYNAMORIO_LIBRARY_NAME) != NULL)) {
os_unmap_file(iter.vm_start, iter.vm_end - iter.vm_start);
}
if (iter.vm_start >= old_libdr_base + old_libdr_size)
Expand Down Expand Up @@ -1802,17 +1872,20 @@ privload_early_inject(void **sp, byte *old_libdr_base, size_t old_libdr_size)
/* Find range of app */
exe_map = module_vaddr_from_prog_header((app_pc)exe_ld.phdrs,
exe_ld.ehdr->e_phnum, NULL, &exe_end);
/* i#1227: on a conflict with the app: reload ourselves */
if ((get_dynamorio_dll_start() < exe_end &&
get_dynamorio_dll_end() > exe_map) ||
/* i#2641: we can't handle something in the text-data gap.
* Various parts of DR assume there's nothing inside (and we even fill the
* gap with a PROT_NONE mmap later: i#1659), so we reload to avoid it,
* under the assumption that it's rare and we're not paying this cost
* very often.
*/
!dynamorio_lib_gap_empty()) {
reload_dynamorio(sp, exe_map, exe_end);
/* i#1227: on a conflict with the app (+ room for the brk): reload ourselves */
if (get_dynamorio_dll_start() < exe_end+APP_BRK_GAP &&
get_dynamorio_dll_end() > exe_map) {
reload_dynamorio(sp, exe_map, exe_end+APP_BRK_GAP);
ASSERT_NOT_REACHED();
}
/* i#2641: we can't handle something in the text-data gap.
* Various parts of DR assume there's nothing inside (and we even fill the
* gap with a PROT_NONE mmap later: i#1659), so we reload to avoid it,
* under the assumption that it's rare and we're not paying this cost
* very often.
*/
if (!dynamorio_lib_gap_empty()) {
reload_dynamorio(sp, get_dynamorio_dll_start(), get_dynamorio_dll_end());
ASSERT_NOT_REACHED();
}

Expand All @@ -1821,7 +1894,8 @@ privload_early_inject(void **sp, byte *old_libdr_base, size_t old_libdr_size)
* will be overridden if preferred base is 0
*/
true,
os_map_file,
/* ensure there's space for the brk */
map_exe_file_and_brk,
os_unmap_file, os_set_protection, 0/*!reachable*/);
apicheck(exe_map != NULL, "Failed to load application. "
"Check path and architecture.");
Expand Down

0 comments on commit 3f848e7

Please sign in to comment.