Skip to content

Commit

Permalink
libunwind, c18n: Support backtrace/exceptions.
Browse files Browse the repository at this point in the history
  • Loading branch information
dstolfa committed Mar 21, 2024
1 parent 9adf052 commit cbd89dd
Show file tree
Hide file tree
Showing 19 changed files with 892 additions and 58 deletions.
14 changes: 11 additions & 3 deletions contrib/subrepo-cheri-libunwind/include/__libunwind_config.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,14 @@

#define _LIBUNWIND_VERSION 15000

#if defined(_LIBUNWIND_SANDBOX_HARDENED) && !defined(_LIBUNWIND_SANDBOX_OTYPES)
#error "_LIBUNWIND_SANDBOX_HARDENED is invalid without a sandboxing mechanism"
#endif

#if defined(_LIBUNWIND_SANDBOX_OTYPES) && defined(_LIBUNWIND_NO_HEAP)
#error "_LIBUNWIND_NO_HEAP cannot be used with _LIBUNWIND_SANDBOX_OTYPES"
#endif

#if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \
!defined(__ARM_DWARF_EH__) && !defined(__SEH__)
#define _LIBUNWIND_ARM_EHABI
Expand All @@ -20,7 +28,7 @@
#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_X86_64 32
#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_PPC 112
#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_PPC64 116
#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_MORELLO 229
#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_MORELLO 230
#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_ARM64 95
#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_ARM 287
#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_OR1K 32
Expand Down Expand Up @@ -76,11 +84,11 @@
# elif defined(__aarch64__)
# define _LIBUNWIND_TARGET_AARCH64 1
# if defined(__CHERI_PURE_CAPABILITY__)
# define _LIBUNWIND_CONTEXT_SIZE 100
# define _LIBUNWIND_CONTEXT_SIZE 102
# if defined(__SEH__)
# error "Pure-capability aarch64 SEH not supported"
# else
# define _LIBUNWIND_CURSOR_SIZE 124
# define _LIBUNWIND_CURSOR_SIZE 130
# endif
# define _LIBUNWIND_HIGHEST_DWARF_REGISTER _LIBUNWIND_HIGHEST_DWARF_REGISTER_MORELLO
# else
Expand Down
3 changes: 2 additions & 1 deletion contrib/subrepo-cheri-libunwind/include/libunwind.h
Original file line number Diff line number Diff line change
Expand Up @@ -678,7 +678,8 @@ enum {
UNW_ARM64_C30 = 228,
UNW_ARM64_CLR = 228,
UNW_ARM64_C31 = 229,
UNW_ARM64_CSP = 229
UNW_ARM64_CSP = 229,
UNW_ARM64_ECSP = 230,
};

// 32-bit ARM registers. Numbers match DWARF for ARM spec #3.1 Table 1.
Expand Down
26 changes: 25 additions & 1 deletion contrib/subrepo-cheri-libunwind/src/AddressSpace.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
#include "dwarf2.h"
#include "EHHeaderParser.hpp"
#include "Registers.hpp"
#include "unwind_cheri.h"

// We can no longer include C++ headers so duplicate std::min() here
template<typename T> T uw_min(T a, T b) { return a < b ? a : b; }
Expand Down Expand Up @@ -320,6 +321,9 @@ class _LIBUNWIND_HIDDEN LocalAddressSpace {
return get<v128>(addr);
}
capability_t getCapability(pint_t addr) { return get<capability_t>(addr); }
#if defined(__CHERI_PURE_CAPABILITY__) && defined(_LIBUNWIND_SANDBOX_OTYPES)
static uintcap_t getUnwindSealer();
#endif // __CHERI_PURE_CAPABILITY__ && _LIBUNWIND_SANDBOX_OTYPES
__attribute__((always_inline))
uintptr_t getP(pint_t addr);
uint64_t getRegister(pint_t addr);
Expand Down Expand Up @@ -408,6 +412,25 @@ inline uint64_t LocalAddressSpace::getRegister(pint_t addr) {
#endif
}

#if defined(__CHERI_PURE_CAPABILITY__) && defined(_LIBUNWIND_SANDBOX_OTYPES)
extern "C" {
/// Call into the RTLD to get a sealer capability. This sealer will be used to
/// seal information in the unwinding context if _LIBUNWIND_SANDBOX_HARDENED is
/// specified.
uintptr_t _rtld_unw_getsealer(void);
uintptr_t __rtld_unw_getsealer();
_LIBUNWIND_HIDDEN uintptr_t __rtld_unw_getsealer() {
return (uintptr_t)-1;
}
_LIBUNWIND_WEAK_ALIAS(__rtld_unw_getsealer, _rtld_unw_getsealer)
}

/// C++ wrapper for calling into RTLD.
inline uintcap_t LocalAddressSpace::getUnwindSealer() {
return _rtld_unw_getsealer();
}
#endif // __CHERI_PURE_CAPABILITY__ && _LIBUNWIND_SANDBOX_OTYPES

/// Read a ULEB128 into a 64-bit word.
inline uint64_t LocalAddressSpace::getULEB128(pint_t &addr, pint_t end) {
const uint8_t *p = (uint8_t *)addr;
Expand Down Expand Up @@ -930,7 +953,8 @@ inline bool LocalAddressSpace::findUnwindSections(pc_t targetAddr,
return true;
#elif defined(_LIBUNWIND_USE_DL_ITERATE_PHDR)
dl_iterate_cb_data cb_data = {this, &info, targetAddr};
CHERI_DBG("Calling dl_iterate_phdr()\n");
CHERI_DBG("Calling dl_iterate_phdr(0x%jx)\n",
(uintmax_t)targetAddr.address());
int found = dl_iterate_phdr(findUnwindSectionsByPhdr, &cb_data);
return static_cast<bool>(found);
#endif
Expand Down
1 change: 1 addition & 0 deletions contrib/subrepo-cheri-libunwind/src/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ set(LIBUNWIND_HEADERS
Registers.hpp
RWMutex.hpp
Unwind-EHABI.h
unwind_cheri.h
UnwindCursor.hpp
../include/libunwind.h
../include/unwind.h
Expand Down
184 changes: 184 additions & 0 deletions contrib/subrepo-cheri-libunwind/src/CompartmentInfo.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,184 @@
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//
// Abstracts unwind information when used with a compartmentalizing runtime
// linker.
//
//===----------------------------------------------------------------------===//

#ifndef __COMPARTMENT_INFO_HPP__
#define __COMPARTMENT_INFO_HPP__

#include "AddressSpace.hpp"

#if defined(__CHERI_PURE_CAPABILITY__)
namespace libunwind {
template <typename A>
class _LIBUNWIND_HIDDEN CompartmentInfo {
typedef typename A::capability_t capability_t;
public:
static const uintcap_t kInvalidRCSP = (uintcap_t)0;
// Per-architecture trusted stack frame layout.
#if defined(_LIBUNWIND_TARGET_AARCH64)
static const uint32_t kNewSPOffset = 48;
static const uint32_t kNextOffset = 32;
static const uint32_t kFPOffset = 0;
static const uint32_t kCalleeSavedOffset = 80;
static const uint32_t kCalleeSavedCount = 10;
static const uint32_t kCalleeSavedSize = 16;
static const uint32_t kReturnAddressOffset = 40;
static const uint32_t kPCOffset = 16;
// kCalleeSavedCount - 1 because kCalleeSavedOffset is the first one.
static const uint32_t kTrustedFrameSize =
kCalleeSavedOffset + (kCalleeSavedCount - 1) * kCalleeSavedSize;
#else
#error "Unsupported architecture for compartmentalization"
#endif
private:
// CompartmentInfo understands how compartments are represented when running
// with a sandboxing runtime linker and is responsible for simulating how the
// runtime linker juggles restricted stacks.
//
// If _LIBUNWIND_SANDBOX_HARDENED is specified, the table pointer will always
// be sealed so that the caller cannot access it.
//
// XXX: Have this call into rtld to get a notion of a "compartent ID" as
// opposed to understanding how rtld juggles stacks under the hood?
struct StackTableEntry {
uintcap_t key = kInvalidRCSP;
uintcap_t value = kInvalidRCSP;
StackTableEntry *next = nullptr;
};
static const uint32_t kStackTableSize = 1 << 10; // XXX: Is this a good size?
static const uint32_t kStackTableMask = kStackTableSize - 1;
// stackTable : start of restricted stack -> top of next caller's stack
StackTableEntry *stackTable;
A &addressSpace;
#if defined(_LIBUNWIND_SANDBOX_OTYPES)
public:
CompartmentInfo(A &as) : addressSpace(as) {
stackTable =
(StackTableEntry *)malloc(kStackTableSize * sizeof(StackTableEntry));
// FIXME: Calling abort() here might be unacceptable. In fact, allocating
// memory here in general might be unacceptable, especially on and targets
// that don't need to use this.
if (stackTable == nullptr)
_LIBUNWIND_ABORT("failed to allocate stack table in CompartmentInfo");
memset(stackTable, 0, sizeof(StackTableEntry) * kStackTableSize);
#ifdef _LIBUNWIND_SANDBOX_HARDENED
capability_t sealer = addressSpace.getUnwindSealer();
if (sealer != addressSpace.to_capability_t(-1))
stackTable = __builtin_cheri_seal(stackTable, sealer);
#endif
CHERI_DBG("allocated new stack table: %#p\n", (void *)stackTable);
}
~CompartmentInfo() { reset(); }

private:
static uint32_t stackHash(uintcap_t stack) {
ptraddr_t stackAddr = (ptraddr_t)stack;
return stackAddr & kStackTableMask;
}
StackTableEntry *findStack(uintcap_t startOfRCSP) {
uint32_t hashIndex = stackHash(startOfRCSP);
CHERI_DBG("findStack(): hashIndex = %u\n", hashIndex);
assert(hashIndex < kStackTableSize);
#ifdef _LIBUNWIND_SANDBOX_HARDENED
capability_t sealer = addressSpace.getUnwindSealer();
StackTableEntry *unsealedTable = __builtin_cheri_unseal(stackTable, sealer);
#else
StackTableEntry *unsealedTable = stackTable;
#endif
StackTableEntry *entry = &unsealedTable[hashIndex];
assert(entry != nullptr);
CHERI_DBG("findStack(): looking for 0x%lx\n", (ptraddr_t)startOfRCSP);
while (entry && entry->key != startOfRCSP) {
CHERI_DBG("findStack(): entry->key = 0x%lx\n", (ptraddr_t)entry->key);
entry = entry->next;
}
return entry;
}
bool insertNewStack(uintcap_t k, uintcap_t v) {
uint32_t hashIndex = stackHash(k);
#ifdef _LIBUNWIND_SANDBOX_HARDENED
capability_t sealer = addressSpace.getUnwindSealer();
StackTableEntry *unsealedTable = __builtin_cheri_unseal(stackTable, sealer);
#else
StackTableEntry *unsealedTable = stackTable;
#endif
StackTableEntry *entry = &unsealedTable[hashIndex];
if (entry == nullptr)
_LIBUNWIND_ABORT("failed to allocate a stack table entry");
if (entry->key == kInvalidRCSP) {
entry->key = k;
entry->value = v;
return true;
}
while (entry->next) {
entry = entry->next;
}
StackTableEntry *newEntry =
(StackTableEntry *)malloc(sizeof(StackTableEntry));
newEntry->key = k;
newEntry->value = v;
newEntry->next = nullptr;
entry->next = newEntry;
CHERI_DBG("insertNewStack(): 0x%lx ==> 0x%lx\n", (ptraddr_t)k,
(ptraddr_t)v);
return true;
}

public:
uintcap_t getAndUpdateRestrictedStack(uintcap_t startOfRCSP,
uintcap_t oldCallerSPTop,
uintcap_t nextRCSP) {
CHERI_DBG("getAndUpdateRestrictedStack(0x%lx, 0x%lx)\n",
(ptraddr_t)startOfRCSP, (ptraddr_t)nextRCSP);
StackTableEntry *entry = findStack(startOfRCSP);
if (entry == nullptr) {
// If there is no entry in our table for a given restricted stack, we will
// simply return nextRCSP which the runtime linker gave us.
CHERI_DBG("stack not found in compartment info, adding 0x%lx ==> 0x%lx\n",
(ptraddr_t)startOfRCSP, (ptraddr_t)oldCallerSPTop);
insertNewStack(startOfRCSP, oldCallerSPTop);
return nextRCSP;
}
// There's already an entry for the restricted stack. Return the next
// restricted stack we expect to unwind or resume from and update the value
// to the next one.
uintcap_t stackToReturn = entry->value;
entry->value = oldCallerSPTop;
CHERI_DBG("getAndUpdateRestrictedStack(): return 0x%lx\n",
(ptraddr_t)stackToReturn);
return stackToReturn;
}
void reset(void) {
// Reinitialize the table to 0 and free everything
#ifdef _LIBUNWIND_SANDBOX_HARDENED
capability_t sealer = addressSpace.getUnwindSealer();
stackTable = __builtin_cheri_unseal(stackTable, sealer);
#endif
for (size_t i = 0; i < kStackTableSize; i++) {
StackTableEntry *entry = &stackTable[i];
assert(entry != nullptr);
StackTableEntry *heapEntry = entry->next;
while (heapEntry) {
StackTableEntry *temp = heapEntry;
heapEntry = heapEntry->next;
free(temp);
}
}
free(stackTable);
}
#else // _LIBUNWIND_SANDBOX_OTYPES
public:
CompartmentInfo(A &as) : addressSpace(as) {}
#endif // _LIBUNWIND_SANDBOX_OTYPES
};
} // namespace libunwind
#endif // __CHERI_PURE_CAPABILITY__
#endif // __COMPARTMENT_INFO_HPP__
Loading

0 comments on commit cbd89dd

Please sign in to comment.