diff --git a/.gitattribute b/.gitattribute new file mode 100644 index 00000000..7d003eeb --- /dev/null +++ b/.gitattribute @@ -0,0 +1,2 @@ +Sources/_CJavaScriptEventLoop/swift/* linguist-vendored +Sources/_CJavaScriptEventLoop/llvm/* linguist-vendored diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 55e793e7..3d4fd5b1 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -10,9 +10,8 @@ jobs: matrix: os: [macos-10.15, macos-11, ubuntu-18.04, ubuntu-20.04] toolchain: - - wasm-5.3.1-RELEASE - - wasm-5.4.0-RELEASE - - wasm-5.5-SNAPSHOT-2021-09-01-a + - wasm-DEVELOPMENT-SNAPSHOT-2021-09-29-a + - wasm-5.5-SNAPSHOT-2021-09-26-a runs-on: ${{ matrix.os }} steps: - name: Checkout diff --git a/.swift-version b/.swift-version index 8fc00360..de980717 100644 --- a/.swift-version +++ b/.swift-version @@ -1 +1 @@ -wasm-5.3.0-RELEASE +wasm-5.5-SNAPSHOT-2021-09-26-a diff --git a/Example/JavaScriptKitExample/Package.swift b/Example/JavaScriptKitExample/Package.swift index 23fe1071..b850d081 100644 --- a/Example/JavaScriptKitExample/Package.swift +++ b/Example/JavaScriptKitExample/Package.swift @@ -1,4 +1,4 @@ -// swift-tools-version:5.2 +// swift-tools-version:5.5 import PackageDescription @@ -10,5 +10,13 @@ let package = Package( ), ], dependencies: [.package(name: "JavaScriptKit", path: "../../")], - targets: [.target(name: "JavaScriptKitExample", dependencies: ["JavaScriptKit"])] + targets: [ + .target( + name: "JavaScriptKitExample", + dependencies: [ + .product(name: "JavaScriptKit", package: "JavaScriptKit"), + .product(name: "JavaScriptEventLoop", package: "JavaScriptKit"), + ] + ), + ] ) diff --git a/Example/JavaScriptKitExample/Sources/JavaScriptKitExample/main.swift b/Example/JavaScriptKitExample/Sources/JavaScriptKitExample/main.swift index cbd11acf..e023b47f 100644 --- a/Example/JavaScriptKitExample/Sources/JavaScriptKitExample/main.swift +++ b/Example/JavaScriptKitExample/Sources/JavaScriptKitExample/main.swift @@ -1,4 +1,7 @@ import JavaScriptKit +import JavaScriptEventLoop + +JavaScriptEventLoop.install() let alert = JSObject.global.alert.function! let document = JSObject.global.document @@ -15,3 +18,15 @@ let listener = JSClosure { _ in buttonElement.onclick = .object(listener) _ = document.body.appendChild(buttonElement) + +let fetch = JSObject.global.fetch.function!.async + +func printZen() async { + let result = await try! fetch("https://api.github.com/zen").object! + let text = await try! result.asyncing.text!() + print(text) +} + +JavaScriptEventLoop.runAsync { + await printZen() +} diff --git a/IntegrationTests/Makefile b/IntegrationTests/Makefile index cdb29570..c00efb1b 100644 --- a/IntegrationTests/Makefile +++ b/IntegrationTests/Makefile @@ -33,3 +33,5 @@ benchmark: benchmark_setup run_benchmark .PHONY: test test: build_rt dist/PrimaryTests.wasm node bin/primary-tests.js +concurrency_test: build_rt dist/ConcurrencyTests.wasm + node bin/concurrency-tests.js diff --git a/IntegrationTests/TestSuites/Package.swift b/IntegrationTests/TestSuites/Package.swift index 4fca7845..ae74f788 100644 --- a/IntegrationTests/TestSuites/Package.swift +++ b/IntegrationTests/TestSuites/Package.swift @@ -1,4 +1,4 @@ -// swift-tools-version:5.2 +// swift-tools-version:5.5 import PackageDescription @@ -15,6 +15,12 @@ let package = Package( dependencies: [.package(name: "JavaScriptKit", path: "../../")], targets: [ .target(name: "PrimaryTests", dependencies: ["JavaScriptKit"]), + .target( + name: "ConcurrencyTests", + dependencies: [ + .product(name: "JavaScriptEventLoop", package: "JavaScriptKit"), + ] + ), .target(name: "BenchmarkTests", dependencies: ["JavaScriptKit"]), ] ) diff --git a/IntegrationTests/TestSuites/Sources/ConcurrencyTests/UnitTestUtils.swift b/IntegrationTests/TestSuites/Sources/ConcurrencyTests/UnitTestUtils.swift new file mode 100644 index 00000000..1f8c502d --- /dev/null +++ b/IntegrationTests/TestSuites/Sources/ConcurrencyTests/UnitTestUtils.swift @@ -0,0 +1,124 @@ +import JavaScriptKit + +var printTestNames = false +// Uncomment the next line to print the name of each test suite before running it. +// This will make it easier to debug any errors that occur on the JS side. +//printTestNames = true + +func test(_ name: String, testBlock: () throws -> Void) throws { + if printTestNames { print(name) } + do { + try testBlock() + } catch { + print("Error in \(name)") + print(error) + throw error + } +} + +func asyncTest(_ name: String, testBlock: () async throws -> Void) async throws -> Void { + if printTestNames { print(name) } + do { + await try testBlock() + } catch { + print("Error in \(name)") + print(error) + throw error + } +} + +struct MessageError: Error { + let message: String + let file: StaticString + let line: UInt + let column: UInt + init(_ message: String, file: StaticString, line: UInt, column: UInt) { + self.message = message + self.file = file + self.line = line + self.column = column + } +} + +func expectEqual( + _ lhs: T, _ rhs: T, + file: StaticString = #file, line: UInt = #line, column: UInt = #column +) throws { + if lhs != rhs { + throw MessageError("Expect to be equal \"\(lhs)\" and \"\(rhs)\"", file: file, line: line, column: column) + } +} + +func expectCast( + _ value: T, to type: U.Type = U.self, + file: StaticString = #file, line: UInt = #line, column: UInt = #column +) throws -> U { + guard let value = value as? U else { + throw MessageError("Expect \"\(value)\" to be \(U.self)", file: file, line: line, column: column) + } + return value +} + +func expectObject(_ value: JSValue, file: StaticString = #file, line: UInt = #line, column: UInt = #column) throws -> JSObject { + switch value { + case let .object(ref): return ref + default: + throw MessageError("Type of \(value) should be \"object\"", file: file, line: line, column: column) + } +} + +func expectArray(_ value: JSValue, file: StaticString = #file, line: UInt = #line, column: UInt = #column) throws -> JSArray { + guard let array = value.array else { + throw MessageError("Type of \(value) should be \"object\"", file: file, line: line, column: column) + } + return array +} + +func expectFunction(_ value: JSValue, file: StaticString = #file, line: UInt = #line, column: UInt = #column) throws -> JSFunction { + switch value { + case let .function(ref): return ref + default: + throw MessageError("Type of \(value) should be \"function\"", file: file, line: line, column: column) + } +} + +func expectBoolean(_ value: JSValue, file: StaticString = #file, line: UInt = #line, column: UInt = #column) throws -> Bool { + switch value { + case let .boolean(bool): return bool + default: + throw MessageError("Type of \(value) should be \"boolean\"", file: file, line: line, column: column) + } +} + +func expectNumber(_ value: JSValue, file: StaticString = #file, line: UInt = #line, column: UInt = #column) throws -> Double { + switch value { + case let .number(number): return number + default: + throw MessageError("Type of \(value) should be \"number\"", file: file, line: line, column: column) + } +} + +func expectString(_ value: JSValue, file: StaticString = #file, line: UInt = #line, column: UInt = #column) throws -> String { + switch value { + case let .string(string): return String(string) + default: + throw MessageError("Type of \(value) should be \"string\"", file: file, line: line, column: column) + } +} + +func expectAsyncThrow(_ body: @autoclosure () async throws -> T, file: StaticString = #file, line: UInt = #line, column: UInt = #column) async throws -> Error { + do { + _ = await try body() + } catch { + return error + } + throw MessageError("Expect to throw an exception", file: file, line: line, column: column) +} + +func expectNotNil(_ value: T?, file: StaticString = #file, line: UInt = #line, column: UInt = #column) throws { + switch value { + case .some: return + case .none: + throw MessageError("Expect a non-nil value", file: file, line: line, column: column) + } +} diff --git a/IntegrationTests/TestSuites/Sources/ConcurrencyTests/main.swift b/IntegrationTests/TestSuites/Sources/ConcurrencyTests/main.swift new file mode 100644 index 00000000..c1d8a330 --- /dev/null +++ b/IntegrationTests/TestSuites/Sources/ConcurrencyTests/main.swift @@ -0,0 +1,40 @@ +import JavaScriptEventLoop +import JavaScriptKit + +JavaScriptEventLoop.install() + +try JavaScriptEventLoop.runAsync { + struct E: Error, Equatable { + let value: Int + } + + await try asyncTest("Task.runDetached value") { + let handle = Task.runDetached { 1 } + await try expectEqual(handle.get(), 1) + } + + await try asyncTest("Task.runDetached throws") { + let handle = Task.runDetached { + throw E(value: 2) + } + let error = await try expectAsyncThrow(await handle.get()) + let e = try expectCast(error, to: E.self) + try expectEqual(e, E(value: 2)) + } + + await try asyncTest("await resolved Promise") { + let p = JSPromise(resolver: { resolve in + resolve(.success(1)) + }) + await try expectEqual(p.await(), 1) + } + + await try asyncTest("await rejected Promise") { + let p = JSPromise(resolver: { resolve in + resolve(.failure(.number(3))) + }) + let error = await try expectAsyncThrow(await p.await()) + let jsValue = try expectCast(error, to: JSValue.self) + try expectEqual(jsValue, 3) + } +} diff --git a/IntegrationTests/bin/concurrency-tests.js b/IntegrationTests/bin/concurrency-tests.js new file mode 100644 index 00000000..3fe09fd8 --- /dev/null +++ b/IntegrationTests/bin/concurrency-tests.js @@ -0,0 +1,15 @@ +const { startWasiTask } = require("../lib"); + +global.fetch = require('node-fetch'); +global.sleep = function () { + return new Promise(resolve => { + setTimeout(() => { + resolve('resolved'); + }, 2000); + }); +} + +startWasiTask("./dist/ConcurrencyTests.wasm").catch((err) => { + console.log(err); + process.exit(1); +}); diff --git a/Package.swift b/Package.swift index 813fe7c2..40818367 100644 --- a/Package.swift +++ b/Package.swift @@ -1,4 +1,4 @@ -// swift-tools-version:5.2 +// swift-tools-version:5.3 import PackageDescription @@ -6,12 +6,36 @@ let package = Package( name: "JavaScriptKit", products: [ .library(name: "JavaScriptKit", targets: ["JavaScriptKit"]), + .library(name: "JavaScriptEventLoop", targets: ["JavaScriptEventLoop"]), ], targets: [ .target( name: "JavaScriptKit", dependencies: ["_CJavaScriptKit"] ), + .target( + name: "JavaScriptEventLoop", + dependencies: ["JavaScriptKit", "_CJavaScriptEventLoop"], + swiftSettings: [ + .unsafeFlags(["-Xfrontend", "-enable-experimental-concurrency"]), + ] + ), .target(name: "_CJavaScriptKit"), - ] + .target( + name: "_CJavaScriptEventLoop", + dependencies: ["_CJavaScriptKit"], + exclude: [ + "README", "LICENSE-llvm", "LICENSE-swift", "scripts", + "include/swift/ABI/MetadataKind.def", + "include/swift/ABI/ValueWitness.def", + "include/swift/AST/ReferenceStorage.def", + "include/swift/Demangling/DemangleNodes.def", + "include/swift/Demangling/ValueWitnessMangling.def", + ], + linkerSettings: [ + .linkedLibrary("swift_Concurrency", .when(platforms: [.wasi])), + ] + ), + ], + cxxLanguageStandard: .cxx14 ) diff --git a/Sources/JavaScriptEventLoop/JSAsyncFunction.swift b/Sources/JavaScriptEventLoop/JSAsyncFunction.swift new file mode 100644 index 00000000..0b508b64 --- /dev/null +++ b/Sources/JavaScriptEventLoop/JSAsyncFunction.swift @@ -0,0 +1,87 @@ +import JavaScriptKit + +/// A `JSFunction` wrapper that enables async-function calls. +/// Exceptions produced by JavaScript functions will be thrown as `JSValue`. +/// +/// ```swift +/// let fetch = JSObject.global.fetch.function!.async +/// let result = await try! fetch("https://api.github.com/zen") +/// ``` +public class JSAsyncFunction { + private let base: JSFunction + public init(_ base: JSFunction) { + self.base = base + } + + /// Call this function with given `arguments` and binding given `this` as context. + /// - Parameters: + /// - this: The value to be passed as the `this` parameter to this function. + /// - arguments: Arguments to be passed to this function. + /// - Returns: The result of this call. + @discardableResult + public func callAsFunction(this: JSObject? = nil, arguments: [ConvertibleToJSValue]) async throws -> JSValue { + let result = base.callAsFunction(this: this, arguments: arguments) + guard let object = result.object, let promise = JSPromise(object) else { + fatalError("'\(result)' should be Promise object") + } + return await try promise.await() + } + + /// A variadic arguments version of `callAsFunction`. + @discardableResult + public func callAsFunction(this: JSObject? = nil, _ arguments: ConvertibleToJSValue...) async throws -> JSValue { + await try callAsFunction(this: this, arguments: arguments) + } +} + +public extension JSFunction { + /// A modifier to call this function as a async function + /// + /// ```swift + /// let fetch = JSObject.global.fetch.function!.async + /// let result = await try! fetch("https://api.github.com/zen") + /// ``` + var `async`: JSAsyncFunction { + JSAsyncFunction(self) + } +} + +/// A `JSObject` wrapper that enables async method calls capturing `this`. +/// Exceptions produced by JavaScript functions will be thrown as `JSValue`. +@dynamicMemberLookup +public class JSAsyncingObject { + private let base: JSObject + public init(_ base: JSObject) { + self.base = base + } + + /// Returns the `name` member method binding this object as `this` context. + /// - Parameter name: The name of this object's member to access. + /// - Returns: The `name` member method binding this object as `this` context. + public subscript(_ name: String) -> ((ConvertibleToJSValue...) async throws -> JSValue)? { + guard let function = base[name].function?.async else { return nil } + return { [base] (arguments: ConvertibleToJSValue...) in + await try function(this: base, arguments: arguments) + } + } + + /// A convenience method of `subscript(_ name: String) -> ((ConvertibleToJSValue...) throws -> JSValue)?` + /// to access the member through Dynamic Member Lookup. + public subscript(dynamicMember name: String) -> ((ConvertibleToJSValue...) async throws -> JSValue)? { + self[name] + } +} + + +public extension JSObject { + + /// A modifier to call methods as async methods capturing `this` + /// + /// ```swift + /// let fetch = JSObject.global.fetch.function!.async + /// let result = await try! fetch("https://api.github.com/zen") + /// ``` + var asyncing: JSAsyncingObject { + JSAsyncingObject(self) + } +} diff --git a/Sources/JavaScriptEventLoop/JavaScriptEventLoop.swift b/Sources/JavaScriptEventLoop/JavaScriptEventLoop.swift new file mode 100644 index 00000000..c8b7c839 --- /dev/null +++ b/Sources/JavaScriptEventLoop/JavaScriptEventLoop.swift @@ -0,0 +1,59 @@ +import _CJavaScriptEventLoop +import JavaScriptKit + +public enum JavaScriptEventLoop { + public static func install() { + installTaskEnqueueHook() + } + public static func runAsync(_ asyncFun: @escaping () async throws -> ()) rethrows { + try _runAsync(asyncFun) + } +} + +public extension JSPromise { + func await() async throws -> JSValue { + await try withUnsafeThrowingContinuation { [self] continuation in + self.then( + success: { + continuation.resume(returning: $0) + return JSValue.undefined + }, + failure: { + continuation.resume(throwing: $0) + return JSValue.undefined + } + ) + } + } +} + +@_silgen_name("swift_run_async") +func _runAsync(_ asyncFun: @escaping () async throws -> ()) rethrows + +private func getPromise(from context: UnsafeMutablePointer) -> JSPromise { + let promise: JSPromise + if let cached = context.pointee.Promise { + promise = Unmanaged.fromOpaque(cached).takeUnretainedValue() + } else { + promise = JSPromise(resolver: { resolver -> Void in + resolver(.success(.undefined)) + }) + let pointer = Unmanaged.passRetained(promise).retain().toOpaque() + context.pointee.Promise = pointer + } + return promise +} + +#if arch(wasm32) +@_cdecl("registerEventLoopHook") +func registerEventLoopHook( + _ callback: @convention(c) @escaping (UnsafeMutablePointer) -> Void, + _ context: UnsafeMutablePointer +) { + getPromise(from: context).then { _ in + callback(context) + return JSValue.undefined + } +} + +#endif diff --git a/Sources/_CJavaScriptEventLoop/AsyncCall.h b/Sources/_CJavaScriptEventLoop/AsyncCall.h new file mode 100644 index 00000000..b813f071 --- /dev/null +++ b/Sources/_CJavaScriptEventLoop/AsyncCall.h @@ -0,0 +1,196 @@ +//===--- AsyncCall.h - Conveniences for doing async calls ----------*- C++ -*-// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +// +// Convenience functions for implementing Swift asynchronous functions +// in C++ code. +// +//===----------------------------------------------------------------------===// + +#ifndef SWIFT_CONCURRENCY_ASYNCCALL_H +#define SWIFT_CONCURRENCY_ASYNCCALL_H + +#include "swift/Runtime/Concurrency.h" +#include "swift/ABI/Task.h" +#include + +namespace swift { +namespace { + +/// Template-metaprogrammed basic layout for the given sequence of types. +template +struct BasicLayout; +template +struct BasicLayout { + static constexpr size_t size = StartingOffset; +}; +template +struct BasicLayout { + // Round up to a multiple of the alignment. + static constexpr size_t fieldOffset = + (StartingOffset + alignof(HeadTy) - 1) & ~(alignof(HeadTy) - 1); + static constexpr size_t fieldEnd = fieldOffset + sizeof(HeadTy); + using TailLayout = BasicLayout; + static constexpr size_t size = TailLayout::size; +}; + +template +struct BasicLayoutOffset { + static constexpr size_t value = + BasicLayoutOffset::value; +}; +template +struct BasicLayoutOffset { + static constexpr size_t value = Layout::fieldOffset; +}; + +/// Template-metaprogrammed layout for an async frame with the given +/// signature. Works as long as you don't have a mix of indirect and +/// direct results; indirect results should be coded as initial +/// indirect arguments in the function signature. +/// +/// Note that there's always a slot for an error result. +template +struct AsyncFrameLayout; + +template +struct AsyncFrameLayout> { + using BasicLayout = BasicLayout<0, SwiftError*, ArgTys...>; + static constexpr size_t firstArgIndex = 1; +}; +template +struct AsyncFrameLayout> { + using BasicLayout = BasicLayout<0, SwiftError*, ResultTy, ArgTys...>; + static constexpr size_t firstArgIndex = 2; +}; + +/// A helper class which, when used as a base class under common +/// C++ ABIs, adds no extra size to a struct when the template +/// argument is 0. +template +class AsyncFrameStorageHelper: public AsyncContext { + // This needs to be aligned at least this much or else Itanium + // will try to put it in the tail-padding of the AsyncContext. + alignas(void*) + char buffer[Size]; +public: + using AsyncContext::AsyncContext; + char *data() { return buffer; } +}; +template <> +class AsyncFrameStorageHelper<0>: public AsyncContext { +public: + using AsyncContext::AsyncContext; + char *data() { return reinterpret_cast(this); } +}; + +template > +struct AsyncFrameStorage; +template +struct AsyncFrameStorage, + FrameLayout> + : AsyncFrameStorageHelper { + + AsyncFrameStorage(AsyncContextFlags flags, + TaskContinuationFunction *resumeFunction, + ExecutorRef resumeToExecutor, + AsyncContext *resumeToContext, + ArgTys... args) + : AsyncFrameStorageHelper( + flags, resumeFunction, resumeToExecutor, resumeToContext) { + initializeHelper(this->data(), args...); + } + +private: + template + void initializeHelper(char *buffer) {} + + template + void initializeHelper(char *buffer, NextArgTy nextArg, + TailArgTys... tailArgs) { + auto offset = BasicLayoutOffset::value; + new (buffer + offset) NextArgTy(nextArg); + initializeHelper(buffer, tailArgs...); + } +}; + + +/// The context header for calling a function that takes the +/// given arguments. +template +struct AsyncCalleeContext : AsyncFrameStorage { + using CallerContext = CallerContextType; + + template + AsyncCalleeContext(TaskContinuationFunction *resumeFunction, + ExecutorRef resumeToExecutor, + CallerContext *resumeToContext, + Args... args) + : AsyncFrameStorage(AsyncContextKind::Ordinary, + resumeFunction, resumeToExecutor, + resumeToContext, args...) {} + + CallerContext *getParent() const { + return static_cast(this->Parent); + } +}; + +/// Push a context to call a function. +template +static AsyncCalleeContext * +pushAsyncContext(AsyncTask *task, ExecutorRef executor, + CallerContext *callerContext, size_t calleeContextSize, + TaskContinuationFunction *resumeFunction, + Args... args) { + using CalleeContext = + AsyncCalleeContext; + assert(calleeContextSize >= sizeof(CalleeContext)); + + void *rawCalleeContext = swift_task_alloc(task, calleeContextSize); + return new (rawCalleeContext) CalleeContext(resumeFunction, executor, + callerContext, args...); +} + +/// Make an asynchronous call. +template +SWIFT_CC(swiftasync) +static void callAsync(AsyncTask *task, + ExecutorRef executor, + CallerContext *callerContext, + TaskContinuationFunction *resumeFunction, + const typename CalleeSignature::FunctionPointer *function, + Args... args) { + auto calleeContextSize = function->ExpectedContextSize; + auto calleeContext = pushAsyncContext(task, executor, callerContext, + calleeContextSize, resumeFunction, + args...); + return function->Function(task, executor, calleeContext); +} + +/// Given that that we've just entered the caller's continuation function +/// upon return from a function previously called with callAsync, pop the +/// callee's context and return the caller's context. +template +static typename CalleeContext::CallerContext * +popAsyncContext(AsyncTask *task, CalleeContext *calleeContext) { + auto callerContext = calleeContext->getParent(); + swift_task_dealloc(task, calleeContext); + return callerContext; +} + +} // end anonymous namespace +} // end namespace swift + +#endif diff --git a/Sources/_CJavaScriptEventLoop/Executor.cpp b/Sources/_CJavaScriptEventLoop/Executor.cpp new file mode 100644 index 00000000..a6100e93 --- /dev/null +++ b/Sources/_CJavaScriptEventLoop/Executor.cpp @@ -0,0 +1,90 @@ +#include "Queue.h" +#include "AsyncCall.h" +#include +#include +#include + +using namespace swift; + +static Queue GlobalQueue; + +SWIFT_CC(swift) +static void enqueueGlobal(Job *job) { + GlobalQueue.insertJob(job); +} + +extern "C" void installTaskEnqueueHook(void) { + swift_task_enqueueGlobal_hook = enqueueGlobal; +} + +struct ThickAsyncFunctionContext: HeapObject { + uint32_t ExpectedContextSize; +}; + + + +using RunAndBlockSignature = + AsyncSignature; +struct RunAndBlockContext: AsyncContext { + const void *Function; + HeapObject *FunctionContext; +}; +using RunAndBlockCalleeContext = + AsyncCalleeContext; + +/// Second half of the runAndBlock async function. +SWIFT_CC(swiftasync) +static void runAndBlock_finish(AsyncTask *task, ExecutorRef executor, + AsyncContext *_context) { + auto calleeContext = static_cast(_context); + auto context = popAsyncContext(task, calleeContext); + return context->ResumeParent(task, executor, context); +} + +/// First half of the runAndBlock async function. +SWIFT_CC(swiftasync) +static void runAndBlock_start(AsyncTask *task, ExecutorRef executor, + AsyncContext *_context) { + auto callerContext = static_cast(_context); + + size_t calleeContextSize; + RunAndBlockSignature::FunctionType *function; + + // If the function context is non-null, then the function pointer is + // an ordinary function pointer. + auto functionContext = callerContext->FunctionContext; + if (functionContext) { + function = reinterpret_cast( + const_cast(callerContext->Function)); + calleeContextSize = + static_cast(functionContext) + ->ExpectedContextSize; + + // Otherwise, the function pointer is an async function pointer. + } else { + auto fnPtr = reinterpret_cast( + callerContext->Function); + function = fnPtr->Function; + calleeContextSize = fnPtr->ExpectedContextSize; + } + + auto calleeContext = + pushAsyncContext(task, executor, callerContext, + calleeContextSize, + &runAndBlock_finish, + functionContext); + return function(task, executor, calleeContext); +} + +SWIFT_CC(swift) +extern "C" void swift_run_async(const void *function, HeapObject *functionContext) { + auto pair = swift_task_create_f(JobFlags(JobKind::Task, + JobPriority::Default), + /*parent*/ nullptr, + &runAndBlock_start, + sizeof(RunAndBlockContext)); + auto context = static_cast(pair.InitialContext); + context->Function = function; + context->FunctionContext = functionContext; + swift_task_enqueueGlobal(pair.Task); +} diff --git a/Sources/_CJavaScriptEventLoop/LICENSE-llvm b/Sources/_CJavaScriptEventLoop/LICENSE-llvm new file mode 100644 index 00000000..57151765 --- /dev/null +++ b/Sources/_CJavaScriptEventLoop/LICENSE-llvm @@ -0,0 +1,278 @@ +============================================================================== +The LLVM Project is under the Apache License v2.0 with LLVM Exceptions: +============================================================================== + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +---- LLVM Exceptions to the Apache 2.0 License ---- + +As an exception, if, as a result of your compiling your source code, portions +of this Software are embedded into an Object form of such source code, you +may redistribute such embedded portions in such Object form without complying +with the conditions of Sections 4(a), 4(b) and 4(d) of the License. + +In addition, if you combine or link compiled forms of this Software with +software that is licensed under the GPLv2 ("Combined Software") and if a +court of competent jurisdiction determines that the patent provision (Section +3), the indemnity provision (Section 9) or other Section of the License +conflicts with the conditions of the GPLv2, you may retroactively and +prospectively choose to deem waived or otherwise exclude such Section(s) of +the License, but only in their entirety and only with respect to the Combined +Software. + +============================================================================== +Software from third parties included in the LLVM Project: +============================================================================== +The LLVM Project contains third party software which is under different license +terms. All such code will be identified clearly using at least one of two +mechanisms: +1) It will be in a separate directory tree with its own `LICENSE.txt` or + `LICENSE` file at the top containing the specific license and restrictions + which apply to that software, or +2) It will contain specific license and restriction terms at the top of every + file. + +============================================================================== +Legacy LLVM License (https://llvm.org/docs/DeveloperPolicy.html#legacy): +============================================================================== +University of Illinois/NCSA +Open Source License + +Copyright (c) 2003-2019 University of Illinois at Urbana-Champaign. +All rights reserved. + +Developed by: + + LLVM Team + + University of Illinois at Urbana-Champaign + + http://llvm.org + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal with +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimers. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimers in the + documentation and/or other materials provided with the distribution. + + * Neither the names of the LLVM Team, University of Illinois at + Urbana-Champaign, nor the names of its contributors may be used to + endorse or promote products derived from this Software without specific + prior written permission. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE +SOFTWARE. diff --git a/Sources/_CJavaScriptEventLoop/LICENSE-swift b/Sources/_CJavaScriptEventLoop/LICENSE-swift new file mode 100644 index 00000000..61b0c781 --- /dev/null +++ b/Sources/_CJavaScriptEventLoop/LICENSE-swift @@ -0,0 +1,211 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +## Runtime Library Exception to the Apache 2.0 License: ## + + + As an exception, if you use this Software to compile your source code and + portions of this Software are embedded into the binary product as a result, + you may redistribute such product without providing attribution as would + otherwise be required by Sections 4(a), 4(b) and 4(d) of the License. diff --git a/Sources/_CJavaScriptEventLoop/Queue.cpp b/Sources/_CJavaScriptEventLoop/Queue.cpp new file mode 100644 index 00000000..7713c96a --- /dev/null +++ b/Sources/_CJavaScriptEventLoop/Queue.cpp @@ -0,0 +1,57 @@ +#include "Queue.h" +#include "_CJavaScriptEventLoop.h" + +using namespace swift; + +/// Get the next-in-queue storage slot. +static Job *&nextInQueue(Job *cur) { + return reinterpret_cast(cur->SchedulerPrivate); +} + +Job *Queue::claimNext() { + if (auto job = this->HeadJob) { + this->HeadJob = nextInQueue(job); + return job; + } + return nullptr; +} + +Queue::Queue() : HeadJob(nullptr), isSpinning(false) { + this->Context = (EventLoopContext) { + .Queue = this, + .Promise = nullptr, + }; +} + +void runEnqueuedJobs(EventLoopContext *context) { + Queue *queue = (Queue *)(context->Queue); + assert(queue->isSpinning); + + while (auto *job = queue->claimNext()) { + job->run(ExecutorRef::generic()); + } + + queue->isSpinning = false; +} + +void Queue::insertJob(swift::Job *newJob) { + Job **position = &HeadJob; + while (auto cur = *position) { + // If we find a job with lower priority, insert here. + if (cur->getPriority() < newJob->getPriority()) { + nextInQueue(newJob) = cur; + *position = newJob; + return; + } + + // Otherwise, keep advancing through the queue. + position = &nextInQueue(cur); + } + nextInQueue(newJob) = nullptr; + *position = newJob; + + if (!isSpinning) { + isSpinning = true; + registerEventLoopHook(runEnqueuedJobs, &this->Context); + } +} diff --git a/Sources/_CJavaScriptEventLoop/Queue.h b/Sources/_CJavaScriptEventLoop/Queue.h new file mode 100644 index 00000000..1f7d4766 --- /dev/null +++ b/Sources/_CJavaScriptEventLoop/Queue.h @@ -0,0 +1,22 @@ +#ifndef _CJavaScriptEventLoop_Queue_h +#define _CJavaScriptEventLoop_Queue_h + +#include +#include +#include <__nullptr> // FIXME +#include +#include "_CJavaScriptEventLoop.h" + +class Queue { +public: + swift::Job *HeadJob; + bool isSpinning; + EventLoopContext Context; + + Queue(); + void insertJob(swift::Job *newJob); + swift::Job *claimNext(); +}; + + +#endif /* _CJavaScriptEventLoop_Queue_h */ diff --git a/Sources/_CJavaScriptEventLoop/README b/Sources/_CJavaScriptEventLoop/README new file mode 100644 index 00000000..98275a7c --- /dev/null +++ b/Sources/_CJavaScriptEventLoop/README @@ -0,0 +1,9 @@ +# Concurrency support with JavaScript EventLoop + +- include/llvm is copied from LLVM project +- include/swift and stdlib directories are copied from Swift project + + +## Update Swift headers + +Please update `.swift-version` and run `./scripts/copy-headers` diff --git a/Sources/_CJavaScriptEventLoop/XcodeSupport.cpp b/Sources/_CJavaScriptEventLoop/XcodeSupport.cpp new file mode 100644 index 00000000..7ae4951c --- /dev/null +++ b/Sources/_CJavaScriptEventLoop/XcodeSupport.cpp @@ -0,0 +1,33 @@ +#include "stdlib/public/SwiftShims/Visibility.h" +#include +#include +#include "_CJavaScriptEventLoop.h" + +using namespace swift; + +#ifndef __wasm32__ + +SWIFT_CC(swift) +void (*swift::swift_task_enqueueGlobal_hook)(Job *job) = nullptr; + +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +void *swift_task_alloc(AsyncTask *task, size_t size) {} + +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +AsyncTaskAndContext swift_task_create_f(JobFlags flags, + AsyncTask *parent, + ThinNullaryAsyncSignature::FunctionType *function, + size_t initialContextSize) {} + +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +void swift_task_dealloc(AsyncTask *task, void *ptr) {} + +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +void swift_task_enqueueGlobal(Job *job) {} + + +void registerEventLoopHook(void callback(EventLoopContext *context), + EventLoopContext *context) { + // dummy implementation +} +#endif diff --git a/Sources/_CJavaScriptEventLoop/include/_CJavaScriptEventLoop.h b/Sources/_CJavaScriptEventLoop/include/_CJavaScriptEventLoop.h new file mode 100644 index 00000000..f258924b --- /dev/null +++ b/Sources/_CJavaScriptEventLoop/include/_CJavaScriptEventLoop.h @@ -0,0 +1,20 @@ +#ifndef _CJavaScriptEventLoop_h +#define _CJavaScriptEventLoop_h + + +typedef struct { + void *Queue; + void *Promise; +} EventLoopContext; + +#ifdef __cplusplus +extern "C" { +#endif +void installTaskEnqueueHook(void); +void registerEventLoopHook(void callback(EventLoopContext *context), + EventLoopContext *context); +#ifdef __cplusplus +}; +#endif + +#endif // _CJavaScriptEventLoop_h diff --git a/Sources/_CJavaScriptEventLoop/include/llvm/ADT/ArrayRef.h b/Sources/_CJavaScriptEventLoop/include/llvm/ADT/ArrayRef.h new file mode 100644 index 00000000..f5af0193 --- /dev/null +++ b/Sources/_CJavaScriptEventLoop/include/llvm/ADT/ArrayRef.h @@ -0,0 +1,560 @@ +//===- ArrayRef.h - Array Reference Wrapper ---------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_ARRAYREF_H +#define LLVM_ADT_ARRAYREF_H + +#include "llvm/ADT/Hashing.h" +#include "llvm/ADT/None.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/Support/Compiler.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +inline namespace __swift { inline namespace __runtime { +namespace llvm { + + /// ArrayRef - Represent a constant reference to an array (0 or more elements + /// consecutively in memory), i.e. a start pointer and a length. It allows + /// various APIs to take consecutive elements easily and conveniently. + /// + /// This class does not own the underlying data, it is expected to be used in + /// situations where the data resides in some other buffer, whose lifetime + /// extends past that of the ArrayRef. For this reason, it is not in general + /// safe to store an ArrayRef. + /// + /// This is intended to be trivially copyable, so it should be passed by + /// value. + template + class LLVM_GSL_POINTER LLVM_NODISCARD ArrayRef { + public: + using iterator = const T *; + using const_iterator = const T *; + using size_type = size_t; + using reverse_iterator = std::reverse_iterator; + + private: + /// The start of the array, in an external buffer. + const T *Data = nullptr; + + /// The number of elements. + size_type Length = 0; + + public: + /// @name Constructors + /// @{ + + /// Construct an empty ArrayRef. + /*implicit*/ ArrayRef() = default; + + /// Construct an empty ArrayRef from None. + /*implicit*/ ArrayRef(NoneType) {} + + /// Construct an ArrayRef from a single element. + /*implicit*/ ArrayRef(const T &OneElt) + : Data(&OneElt), Length(1) {} + + /// Construct an ArrayRef from a pointer and length. + /*implicit*/ ArrayRef(const T *data, size_t length) + : Data(data), Length(length) {} + + /// Construct an ArrayRef from a range. + ArrayRef(const T *begin, const T *end) + : Data(begin), Length(end - begin) {} + + /// Construct an ArrayRef from a SmallVector. This is templated in order to + /// avoid instantiating SmallVectorTemplateCommon whenever we + /// copy-construct an ArrayRef. + template + /*implicit*/ ArrayRef(const SmallVectorTemplateCommon &Vec) + : Data(Vec.data()), Length(Vec.size()) { + } + + /// Construct an ArrayRef from a std::vector. + template + /*implicit*/ ArrayRef(const std::vector &Vec) + : Data(Vec.data()), Length(Vec.size()) {} + + /// Construct an ArrayRef from a std::array + template + /*implicit*/ constexpr ArrayRef(const std::array &Arr) + : Data(Arr.data()), Length(N) {} + + /// Construct an ArrayRef from a C array. + template + /*implicit*/ constexpr ArrayRef(const T (&Arr)[N]) : Data(Arr), Length(N) {} + + /// Construct an ArrayRef from a std::initializer_list. +#if LLVM_GNUC_PREREQ(9, 0, 0) +// Disable gcc's warning in this constructor as it generates an enormous amount +// of messages. Anyone using ArrayRef should already be aware of the fact that +// it does not do lifetime extension. +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Winit-list-lifetime" +#endif + /*implicit*/ ArrayRef(const std::initializer_list &Vec) + : Data(Vec.begin() == Vec.end() ? (T*)nullptr : Vec.begin()), + Length(Vec.size()) {} +#if LLVM_GNUC_PREREQ(9, 0, 0) +#pragma GCC diagnostic pop +#endif + + /// Construct an ArrayRef from ArrayRef. This uses SFINAE to + /// ensure that only ArrayRefs of pointers can be converted. + template + ArrayRef(const ArrayRef &A, + std::enable_if_t::value> + * = nullptr) + : Data(A.data()), Length(A.size()) {} + + /// Construct an ArrayRef from a SmallVector. This is + /// templated in order to avoid instantiating SmallVectorTemplateCommon + /// whenever we copy-construct an ArrayRef. + template + /*implicit*/ ArrayRef( + const SmallVectorTemplateCommon &Vec, + std::enable_if_t::value> * = + nullptr) + : Data(Vec.data()), Length(Vec.size()) {} + + /// Construct an ArrayRef from std::vector. This uses SFINAE + /// to ensure that only vectors of pointers can be converted. + template + ArrayRef(const std::vector &Vec, + std::enable_if_t::value> + * = 0) + : Data(Vec.data()), Length(Vec.size()) {} + + /// @} + /// @name Simple Operations + /// @{ + + iterator begin() const { return Data; } + iterator end() const { return Data + Length; } + + reverse_iterator rbegin() const { return reverse_iterator(end()); } + reverse_iterator rend() const { return reverse_iterator(begin()); } + + /// empty - Check if the array is empty. + bool empty() const { return Length == 0; } + + const T *data() const { return Data; } + + /// size - Get the array size. + size_t size() const { return Length; } + + /// front - Get the first element. + const T &front() const { + assert(!empty()); + return Data[0]; + } + + /// back - Get the last element. + const T &back() const { + assert(!empty()); + return Data[Length-1]; + } + + // copy - Allocate copy in Allocator and return ArrayRef to it. + template ArrayRef copy(Allocator &A) { + T *Buff = A.template Allocate(Length); + std::uninitialized_copy(begin(), end(), Buff); + return ArrayRef(Buff, Length); + } + + /// equals - Check for element-wise equality. + bool equals(ArrayRef RHS) const { + if (Length != RHS.Length) + return false; + return std::equal(begin(), end(), RHS.begin()); + } + + /// slice(n, m) - Chop off the first N elements of the array, and keep M + /// elements in the array. + ArrayRef slice(size_t N, size_t M) const { + assert(N+M <= size() && "Invalid specifier"); + return ArrayRef(data()+N, M); + } + + /// slice(n) - Chop off the first N elements of the array. + ArrayRef slice(size_t N) const { return slice(N, size() - N); } + + /// Drop the first \p N elements of the array. + ArrayRef drop_front(size_t N = 1) const { + assert(size() >= N && "Dropping more elements than exist"); + return slice(N, size() - N); + } + + /// Drop the last \p N elements of the array. + ArrayRef drop_back(size_t N = 1) const { + assert(size() >= N && "Dropping more elements than exist"); + return slice(0, size() - N); + } + + /// Return a copy of *this with the first N elements satisfying the + /// given predicate removed. + template ArrayRef drop_while(PredicateT Pred) const { + return ArrayRef(find_if_not(*this, Pred), end()); + } + + /// Return a copy of *this with the first N elements not satisfying + /// the given predicate removed. + template ArrayRef drop_until(PredicateT Pred) const { + return ArrayRef(find_if(*this, Pred), end()); + } + + /// Return a copy of *this with only the first \p N elements. + ArrayRef take_front(size_t N = 1) const { + if (N >= size()) + return *this; + return drop_back(size() - N); + } + + /// Return a copy of *this with only the last \p N elements. + ArrayRef take_back(size_t N = 1) const { + if (N >= size()) + return *this; + return drop_front(size() - N); + } + + /// Return the first N elements of this Array that satisfy the given + /// predicate. + template ArrayRef take_while(PredicateT Pred) const { + return ArrayRef(begin(), find_if_not(*this, Pred)); + } + + /// Return the first N elements of this Array that don't satisfy the + /// given predicate. + template ArrayRef take_until(PredicateT Pred) const { + return ArrayRef(begin(), find_if(*this, Pred)); + } + + /// @} + /// @name Operator Overloads + /// @{ + const T &operator[](size_t Index) const { + assert(Index < Length && "Invalid index!"); + return Data[Index]; + } + + /// Disallow accidental assignment from a temporary. + /// + /// The declaration here is extra complicated so that "arrayRef = {}" + /// continues to select the move assignment operator. + template + std::enable_if_t::value, ArrayRef> & + operator=(U &&Temporary) = delete; + + /// Disallow accidental assignment from a temporary. + /// + /// The declaration here is extra complicated so that "arrayRef = {}" + /// continues to select the move assignment operator. + template + std::enable_if_t::value, ArrayRef> & + operator=(std::initializer_list) = delete; + + /// @} + /// @name Expensive Operations + /// @{ + std::vector vec() const { + return std::vector(Data, Data+Length); + } + + /// @} + /// @name Conversion operators + /// @{ + operator std::vector() const { + return std::vector(Data, Data+Length); + } + + /// @} + }; + + /// MutableArrayRef - Represent a mutable reference to an array (0 or more + /// elements consecutively in memory), i.e. a start pointer and a length. It + /// allows various APIs to take and modify consecutive elements easily and + /// conveniently. + /// + /// This class does not own the underlying data, it is expected to be used in + /// situations where the data resides in some other buffer, whose lifetime + /// extends past that of the MutableArrayRef. For this reason, it is not in + /// general safe to store a MutableArrayRef. + /// + /// This is intended to be trivially copyable, so it should be passed by + /// value. + template + class LLVM_NODISCARD MutableArrayRef : public ArrayRef { + public: + using iterator = T *; + using reverse_iterator = std::reverse_iterator; + + /// Construct an empty MutableArrayRef. + /*implicit*/ MutableArrayRef() = default; + + /// Construct an empty MutableArrayRef from None. + /*implicit*/ MutableArrayRef(NoneType) : ArrayRef() {} + + /// Construct a MutableArrayRef from a single element. + /*implicit*/ MutableArrayRef(T &OneElt) : ArrayRef(OneElt) {} + + /// Construct a MutableArrayRef from a pointer and length. + /*implicit*/ MutableArrayRef(T *data, size_t length) + : ArrayRef(data, length) {} + + /// Construct a MutableArrayRef from a range. + MutableArrayRef(T *begin, T *end) : ArrayRef(begin, end) {} + + /// Construct a MutableArrayRef from a SmallVector. + /*implicit*/ MutableArrayRef(SmallVectorImpl &Vec) + : ArrayRef(Vec) {} + + /// Construct a MutableArrayRef from a std::vector. + /*implicit*/ MutableArrayRef(std::vector &Vec) + : ArrayRef(Vec) {} + + /// Construct a MutableArrayRef from a std::array + template + /*implicit*/ constexpr MutableArrayRef(std::array &Arr) + : ArrayRef(Arr) {} + + /// Construct a MutableArrayRef from a C array. + template + /*implicit*/ constexpr MutableArrayRef(T (&Arr)[N]) : ArrayRef(Arr) {} + + T *data() const { return const_cast(ArrayRef::data()); } + + iterator begin() const { return data(); } + iterator end() const { return data() + this->size(); } + + reverse_iterator rbegin() const { return reverse_iterator(end()); } + reverse_iterator rend() const { return reverse_iterator(begin()); } + + /// front - Get the first element. + T &front() const { + assert(!this->empty()); + return data()[0]; + } + + /// back - Get the last element. + T &back() const { + assert(!this->empty()); + return data()[this->size()-1]; + } + + /// slice(n, m) - Chop off the first N elements of the array, and keep M + /// elements in the array. + MutableArrayRef slice(size_t N, size_t M) const { + assert(N + M <= this->size() && "Invalid specifier"); + return MutableArrayRef(this->data() + N, M); + } + + /// slice(n) - Chop off the first N elements of the array. + MutableArrayRef slice(size_t N) const { + return slice(N, this->size() - N); + } + + /// Drop the first \p N elements of the array. + MutableArrayRef drop_front(size_t N = 1) const { + assert(this->size() >= N && "Dropping more elements than exist"); + return slice(N, this->size() - N); + } + + MutableArrayRef drop_back(size_t N = 1) const { + assert(this->size() >= N && "Dropping more elements than exist"); + return slice(0, this->size() - N); + } + + /// Return a copy of *this with the first N elements satisfying the + /// given predicate removed. + template + MutableArrayRef drop_while(PredicateT Pred) const { + return MutableArrayRef(find_if_not(*this, Pred), end()); + } + + /// Return a copy of *this with the first N elements not satisfying + /// the given predicate removed. + template + MutableArrayRef drop_until(PredicateT Pred) const { + return MutableArrayRef(find_if(*this, Pred), end()); + } + + /// Return a copy of *this with only the first \p N elements. + MutableArrayRef take_front(size_t N = 1) const { + if (N >= this->size()) + return *this; + return drop_back(this->size() - N); + } + + /// Return a copy of *this with only the last \p N elements. + MutableArrayRef take_back(size_t N = 1) const { + if (N >= this->size()) + return *this; + return drop_front(this->size() - N); + } + + /// Return the first N elements of this Array that satisfy the given + /// predicate. + template + MutableArrayRef take_while(PredicateT Pred) const { + return MutableArrayRef(begin(), find_if_not(*this, Pred)); + } + + /// Return the first N elements of this Array that don't satisfy the + /// given predicate. + template + MutableArrayRef take_until(PredicateT Pred) const { + return MutableArrayRef(begin(), find_if(*this, Pred)); + } + + /// @} + /// @name Operator Overloads + /// @{ + T &operator[](size_t Index) const { + assert(Index < this->size() && "Invalid index!"); + return data()[Index]; + } + }; + + /// This is a MutableArrayRef that owns its array. + template class OwningArrayRef : public MutableArrayRef { + public: + OwningArrayRef() = default; + OwningArrayRef(size_t Size) : MutableArrayRef(new T[Size], Size) {} + + OwningArrayRef(ArrayRef Data) + : MutableArrayRef(new T[Data.size()], Data.size()) { + std::copy(Data.begin(), Data.end(), this->begin()); + } + + OwningArrayRef(OwningArrayRef &&Other) { *this = std::move(Other); } + + OwningArrayRef &operator=(OwningArrayRef &&Other) { + delete[] this->data(); + this->MutableArrayRef::operator=(Other); + Other.MutableArrayRef::operator=(MutableArrayRef()); + return *this; + } + + ~OwningArrayRef() { delete[] this->data(); } + }; + + /// @name ArrayRef Convenience constructors + /// @{ + + /// Construct an ArrayRef from a single element. + template + ArrayRef makeArrayRef(const T &OneElt) { + return OneElt; + } + + /// Construct an ArrayRef from a pointer and length. + template + ArrayRef makeArrayRef(const T *data, size_t length) { + return ArrayRef(data, length); + } + + /// Construct an ArrayRef from a range. + template + ArrayRef makeArrayRef(const T *begin, const T *end) { + return ArrayRef(begin, end); + } + + /// Construct an ArrayRef from a SmallVector. + template + ArrayRef makeArrayRef(const SmallVectorImpl &Vec) { + return Vec; + } + + /// Construct an ArrayRef from a SmallVector. + template + ArrayRef makeArrayRef(const SmallVector &Vec) { + return Vec; + } + + /// Construct an ArrayRef from a std::vector. + template + ArrayRef makeArrayRef(const std::vector &Vec) { + return Vec; + } + + /// Construct an ArrayRef from a std::array. + template + ArrayRef makeArrayRef(const std::array &Arr) { + return Arr; + } + + /// Construct an ArrayRef from an ArrayRef (no-op) (const) + template ArrayRef makeArrayRef(const ArrayRef &Vec) { + return Vec; + } + + /// Construct an ArrayRef from an ArrayRef (no-op) + template ArrayRef &makeArrayRef(ArrayRef &Vec) { + return Vec; + } + + /// Construct an ArrayRef from a C array. + template + ArrayRef makeArrayRef(const T (&Arr)[N]) { + return ArrayRef(Arr); + } + + /// Construct a MutableArrayRef from a single element. + template + MutableArrayRef makeMutableArrayRef(T &OneElt) { + return OneElt; + } + + /// Construct a MutableArrayRef from a pointer and length. + template + MutableArrayRef makeMutableArrayRef(T *data, size_t length) { + return MutableArrayRef(data, length); + } + + /// @} + /// @name ArrayRef Comparison Operators + /// @{ + + template + inline bool operator==(ArrayRef LHS, ArrayRef RHS) { + return LHS.equals(RHS); + } + + template + inline bool operator==(SmallVectorImpl &LHS, ArrayRef RHS) { + return ArrayRef(LHS).equals(RHS); + } + + template + inline bool operator!=(ArrayRef LHS, ArrayRef RHS) { + return !(LHS == RHS); + } + + template + inline bool operator!=(SmallVectorImpl &LHS, ArrayRef RHS) { + return !(LHS == RHS); + } + + /// @} + + template hash_code hash_value(ArrayRef S) { + return hash_combine_range(S.begin(), S.end()); + } + +} // end namespace llvm +}} // namespace swift::runtime + +#endif // LLVM_ADT_ARRAYREF_H diff --git a/Sources/_CJavaScriptEventLoop/include/llvm/ADT/DenseMap.h b/Sources/_CJavaScriptEventLoop/include/llvm/ADT/DenseMap.h new file mode 100644 index 00000000..d09a747b --- /dev/null +++ b/Sources/_CJavaScriptEventLoop/include/llvm/ADT/DenseMap.h @@ -0,0 +1,1221 @@ +//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the DenseMap class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_DENSEMAP_H +#define LLVM_ADT_DENSEMAP_H + +#include "llvm/ADT/DenseMapInfo.h" +#include "llvm/Support/AlignOf.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/MemAlloc.h" +#include "llvm/Support/type_traits.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +inline namespace __swift { inline namespace __runtime { +namespace llvm { + +namespace detail { + +// We extend a pair to allow users to override the bucket type with their own +// implementation without requiring two members. +template +struct DenseMapPair : public std::pair { + using std::pair::pair; + + KeyT &getFirst() { return std::pair::first; } + const KeyT &getFirst() const { return std::pair::first; } + ValueT &getSecond() { return std::pair::second; } + const ValueT &getSecond() const { return std::pair::second; } +}; + +} // end namespace detail + +template , + typename Bucket = llvm::detail::DenseMapPair, + bool IsConst = false> +class DenseMapIterator; + +template +class DenseMapBase { + template + using const_arg_type_t = typename const_pointer_or_const_ref::type; + +public: + using size_type = unsigned; + using key_type = KeyT; + using mapped_type = ValueT; + using value_type = BucketT; + + using iterator = DenseMapIterator; + using const_iterator = + DenseMapIterator; + + inline iterator begin() { + // When the map is empty, avoid the overhead of advancing/retreating past + // empty buckets. + if (empty()) + return end(); + return makeIterator(getBuckets(), getBucketsEnd()); + } + inline iterator end() { + return makeIterator(getBucketsEnd(), getBucketsEnd(), true); + } + inline const_iterator begin() const { + if (empty()) + return end(); + return makeConstIterator(getBuckets(), getBucketsEnd()); + } + inline const_iterator end() const { + return makeConstIterator(getBucketsEnd(), getBucketsEnd(), true); + } + + LLVM_NODISCARD bool empty() const { + return getNumEntries() == 0; + } + unsigned size() const { return getNumEntries(); } + + /// Grow the densemap so that it can contain at least \p NumEntries items + /// before resizing again. + void reserve(size_type NumEntries) { + auto NumBuckets = getMinBucketToReserveForEntries(NumEntries); + if (NumBuckets > getNumBuckets()) + grow(NumBuckets); + } + + void clear() { + if (getNumEntries() == 0 && getNumTombstones() == 0) return; + + // If the capacity of the array is huge, and the # elements used is small, + // shrink the array. + if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) { + shrink_and_clear(); + return; + } + + const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); + if (std::is_trivially_destructible::value) { + // Use a simpler loop when values don't need destruction. + for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) + P->getFirst() = EmptyKey; + } else { + unsigned NumEntries = getNumEntries(); + for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) { + if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) { + if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) { + P->getSecond().~ValueT(); + --NumEntries; + } + P->getFirst() = EmptyKey; + } + } + assert(NumEntries == 0 && "Node count imbalance!"); + } + setNumEntries(0); + setNumTombstones(0); + } + + /// Return 1 if the specified key is in the map, 0 otherwise. + size_type count(const_arg_type_t Val) const { + const BucketT *TheBucket; + return LookupBucketFor(Val, TheBucket) ? 1 : 0; + } + + iterator find(const_arg_type_t Val) { + BucketT *TheBucket; + if (LookupBucketFor(Val, TheBucket)) + return makeIterator(TheBucket, getBucketsEnd(), true); + return end(); + } + const_iterator find(const_arg_type_t Val) const { + const BucketT *TheBucket; + if (LookupBucketFor(Val, TheBucket)) + return makeConstIterator(TheBucket, getBucketsEnd(), true); + return end(); + } + + /// Alternate version of find() which allows a different, and possibly + /// less expensive, key type. + /// The DenseMapInfo is responsible for supplying methods + /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key + /// type used. + template + iterator find_as(const LookupKeyT &Val) { + BucketT *TheBucket; + if (LookupBucketFor(Val, TheBucket)) + return makeIterator(TheBucket, getBucketsEnd(), true); + return end(); + } + template + const_iterator find_as(const LookupKeyT &Val) const { + const BucketT *TheBucket; + if (LookupBucketFor(Val, TheBucket)) + return makeConstIterator(TheBucket, getBucketsEnd(), true); + return end(); + } + + /// lookup - Return the entry for the specified key, or a default + /// constructed value if no such entry exists. + ValueT lookup(const_arg_type_t Val) const { + const BucketT *TheBucket; + if (LookupBucketFor(Val, TheBucket)) + return TheBucket->getSecond(); + return ValueT(); + } + + // Inserts key,value pair into the map if the key isn't already in the map. + // If the key is already in the map, it returns false and doesn't update the + // value. + std::pair insert(const std::pair &KV) { + return try_emplace(KV.first, KV.second); + } + + // Inserts key,value pair into the map if the key isn't already in the map. + // If the key is already in the map, it returns false and doesn't update the + // value. + std::pair insert(std::pair &&KV) { + return try_emplace(std::move(KV.first), std::move(KV.second)); + } + + // Inserts key,value pair into the map if the key isn't already in the map. + // The value is constructed in-place if the key is not in the map, otherwise + // it is not moved. + template + std::pair try_emplace(KeyT &&Key, Ts &&... Args) { + BucketT *TheBucket; + if (LookupBucketFor(Key, TheBucket)) + return std::make_pair(makeIterator(TheBucket, getBucketsEnd(), true), + false); // Already in map. + + // Otherwise, insert the new element. + TheBucket = + InsertIntoBucket(TheBucket, std::move(Key), std::forward(Args)...); + return std::make_pair(makeIterator(TheBucket, getBucketsEnd(), true), true); + } + + // Inserts key,value pair into the map if the key isn't already in the map. + // The value is constructed in-place if the key is not in the map, otherwise + // it is not moved. + template + std::pair try_emplace(const KeyT &Key, Ts &&... Args) { + BucketT *TheBucket; + if (LookupBucketFor(Key, TheBucket)) + return std::make_pair(makeIterator(TheBucket, getBucketsEnd(), true), + false); // Already in map. + + // Otherwise, insert the new element. + TheBucket = InsertIntoBucket(TheBucket, Key, std::forward(Args)...); + return std::make_pair(makeIterator(TheBucket, getBucketsEnd(), true), true); + } + + /// Alternate version of insert() which allows a different, and possibly + /// less expensive, key type. + /// The DenseMapInfo is responsible for supplying methods + /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key + /// type used. + template + std::pair insert_as(std::pair &&KV, + const LookupKeyT &Val) { + BucketT *TheBucket; + if (LookupBucketFor(Val, TheBucket)) + return std::make_pair(makeIterator(TheBucket, getBucketsEnd(), *this, true), + false); // Already in map. + + // Otherwise, insert the new element. + TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first), + std::move(KV.second), Val); + return std::make_pair(makeIterator(TheBucket, getBucketsEnd(), *this, true), + true); + } + + /// insert - Range insertion of pairs. + template + void insert(InputIt I, InputIt E) { + for (; I != E; ++I) + insert(*I); + } + + bool erase(const KeyT &Val) { + BucketT *TheBucket; + if (!LookupBucketFor(Val, TheBucket)) + return false; // not in map. + + TheBucket->getSecond().~ValueT(); + TheBucket->getFirst() = getTombstoneKey(); + decrementNumEntries(); + incrementNumTombstones(); + return true; + } + void erase(iterator I) { + BucketT *TheBucket = &*I; + TheBucket->getSecond().~ValueT(); + TheBucket->getFirst() = getTombstoneKey(); + decrementNumEntries(); + incrementNumTombstones(); + } + + value_type& FindAndConstruct(const KeyT &Key) { + BucketT *TheBucket; + if (LookupBucketFor(Key, TheBucket)) + return *TheBucket; + + return *InsertIntoBucket(TheBucket, Key); + } + + ValueT &operator[](const KeyT &Key) { + return FindAndConstruct(Key).second; + } + + value_type& FindAndConstruct(KeyT &&Key) { + BucketT *TheBucket; + if (LookupBucketFor(Key, TheBucket)) + return *TheBucket; + + return *InsertIntoBucket(TheBucket, std::move(Key)); + } + + ValueT &operator[](KeyT &&Key) { + return FindAndConstruct(std::move(Key)).second; + } + + /// isPointerIntoBucketsArray - Return true if the specified pointer points + /// somewhere into the DenseMap's array of buckets (i.e. either to a key or + /// value in the DenseMap). + bool isPointerIntoBucketsArray(const void *Ptr) const { + return Ptr >= getBuckets() && Ptr < getBucketsEnd(); + } + + /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets + /// array. In conjunction with the previous method, this can be used to + /// determine whether an insertion caused the DenseMap to reallocate. + const void *getPointerIntoBucketsArray() const { return getBuckets(); } + +protected: + DenseMapBase() = default; + + void destroyAll() { + if (getNumBuckets() == 0) // Nothing to do. + return; + + const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); + for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) { + if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) && + !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) + P->getSecond().~ValueT(); + P->getFirst().~KeyT(); + } + } + + void initEmpty() { + setNumEntries(0); + setNumTombstones(0); + + assert((getNumBuckets() & (getNumBuckets()-1)) == 0 && + "# initial buckets must be a power of two!"); + const KeyT EmptyKey = getEmptyKey(); + for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B) + ::new (&B->getFirst()) KeyT(EmptyKey); + } + + /// Returns the number of buckets to allocate to ensure that the DenseMap can + /// accommodate \p NumEntries without need to grow(). + unsigned getMinBucketToReserveForEntries(unsigned NumEntries) { + // Ensure that "NumEntries * 4 < NumBuckets * 3" + if (NumEntries == 0) + return 0; + // +1 is required because of the strict equality. + // For example if NumEntries is 48, we need to return 401. + return NextPowerOf2(NumEntries * 4 / 3 + 1); + } + + void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) { + initEmpty(); + + // Insert all the old elements. + const KeyT EmptyKey = getEmptyKey(); + const KeyT TombstoneKey = getTombstoneKey(); + for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) { + if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) && + !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) { + // Insert the key/value into the new table. + BucketT *DestBucket; + bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket); + (void)FoundVal; // silence warning. + assert(!FoundVal && "Key already in new map?"); + DestBucket->getFirst() = std::move(B->getFirst()); + ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond())); + incrementNumEntries(); + + // Free the value. + B->getSecond().~ValueT(); + } + B->getFirst().~KeyT(); + } + } + + template + void copyFrom( + const DenseMapBase &other) { + assert(&other != this); + assert(getNumBuckets() == other.getNumBuckets()); + + setNumEntries(other.getNumEntries()); + setNumTombstones(other.getNumTombstones()); + + if (is_trivially_copyable::value && + is_trivially_copyable::value) + memcpy(reinterpret_cast(getBuckets()), other.getBuckets(), + getNumBuckets() * sizeof(BucketT)); + else + for (size_t i = 0; i < getNumBuckets(); ++i) { + ::new (&getBuckets()[i].getFirst()) + KeyT(other.getBuckets()[i].getFirst()); + if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) && + !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey())) + ::new (&getBuckets()[i].getSecond()) + ValueT(other.getBuckets()[i].getSecond()); + } + } + + static unsigned getHashValue(const KeyT &Val) { + return KeyInfoT::getHashValue(Val); + } + + template + static unsigned getHashValue(const LookupKeyT &Val) { + return KeyInfoT::getHashValue(Val); + } + + static const KeyT getEmptyKey() { + static_assert(std::is_base_of::value, + "Must pass the derived type to this template!"); + return KeyInfoT::getEmptyKey(); + } + + static const KeyT getTombstoneKey() { + return KeyInfoT::getTombstoneKey(); + } + +private: + iterator makeIterator(BucketT *P, BucketT *E, bool NoAdvance=false) { + return iterator(P, E, NoAdvance); + } + + const_iterator makeConstIterator(const BucketT *P, const BucketT *E, + const bool NoAdvance=false) const { + return const_iterator(P, E, NoAdvance); + } + + unsigned getNumEntries() const { + return static_cast(this)->getNumEntries(); + } + + void setNumEntries(unsigned Num) { + static_cast(this)->setNumEntries(Num); + } + + void incrementNumEntries() { + setNumEntries(getNumEntries() + 1); + } + + void decrementNumEntries() { + setNumEntries(getNumEntries() - 1); + } + + unsigned getNumTombstones() const { + return static_cast(this)->getNumTombstones(); + } + + void setNumTombstones(unsigned Num) { + static_cast(this)->setNumTombstones(Num); + } + + void incrementNumTombstones() { + setNumTombstones(getNumTombstones() + 1); + } + + void decrementNumTombstones() { + setNumTombstones(getNumTombstones() - 1); + } + + const BucketT *getBuckets() const { + return static_cast(this)->getBuckets(); + } + + BucketT *getBuckets() { + return static_cast(this)->getBuckets(); + } + + unsigned getNumBuckets() const { + return static_cast(this)->getNumBuckets(); + } + + BucketT *getBucketsEnd() { + return getBuckets() + getNumBuckets(); + } + + const BucketT *getBucketsEnd() const { + return getBuckets() + getNumBuckets(); + } + + void grow(unsigned AtLeast) { + static_cast(this)->grow(AtLeast); + } + + void shrink_and_clear() { + static_cast(this)->shrink_and_clear(); + } + + template + BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key, + ValueArgs &&... Values) { + TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket); + + TheBucket->getFirst() = std::forward(Key); + ::new (&TheBucket->getSecond()) ValueT(std::forward(Values)...); + return TheBucket; + } + + template + BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key, + ValueT &&Value, LookupKeyT &Lookup) { + TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket); + + TheBucket->getFirst() = std::move(Key); + ::new (&TheBucket->getSecond()) ValueT(std::move(Value)); + return TheBucket; + } + + template + BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup, + BucketT *TheBucket) { + // If the load of the hash table is more than 3/4, or if fewer than 1/8 of + // the buckets are empty (meaning that many are filled with tombstones), + // grow the table. + // + // The later case is tricky. For example, if we had one empty bucket with + // tons of tombstones, failing lookups (e.g. for insertion) would have to + // probe almost the entire table until it found the empty bucket. If the + // table completely filled with tombstones, no lookup would ever succeed, + // causing infinite loops in lookup. + unsigned NewNumEntries = getNumEntries() + 1; + unsigned NumBuckets = getNumBuckets(); + if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) { + this->grow(NumBuckets * 2); + LookupBucketFor(Lookup, TheBucket); + NumBuckets = getNumBuckets(); + } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <= + NumBuckets/8)) { + this->grow(NumBuckets); + LookupBucketFor(Lookup, TheBucket); + } + assert(TheBucket); + + // Only update the state after we've grown our bucket space appropriately + // so that when growing buckets we have self-consistent entry count. + incrementNumEntries(); + + // If we are writing over a tombstone, remember this. + const KeyT EmptyKey = getEmptyKey(); + if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey)) + decrementNumTombstones(); + + return TheBucket; + } + + /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in + /// FoundBucket. If the bucket contains the key and a value, this returns + /// true, otherwise it returns a bucket with an empty marker or tombstone and + /// returns false. + template + bool LookupBucketFor(const LookupKeyT &Val, + const BucketT *&FoundBucket) const { + const BucketT *BucketsPtr = getBuckets(); + const unsigned NumBuckets = getNumBuckets(); + + if (NumBuckets == 0) { + FoundBucket = nullptr; + return false; + } + + // FoundTombstone - Keep track of whether we find a tombstone while probing. + const BucketT *FoundTombstone = nullptr; + const KeyT EmptyKey = getEmptyKey(); + const KeyT TombstoneKey = getTombstoneKey(); + assert(!KeyInfoT::isEqual(Val, EmptyKey) && + !KeyInfoT::isEqual(Val, TombstoneKey) && + "Empty/Tombstone value shouldn't be inserted into map!"); + + unsigned BucketNo = getHashValue(Val) & (NumBuckets-1); + unsigned ProbeAmt = 1; + while (true) { + const BucketT *ThisBucket = BucketsPtr + BucketNo; + // Found Val's bucket? If so, return it. + if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) { + FoundBucket = ThisBucket; + return true; + } + + // If we found an empty bucket, the key doesn't exist in the set. + // Insert it and return the default value. + if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) { + // If we've already seen a tombstone while probing, fill it in instead + // of the empty bucket we eventually probed to. + FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket; + return false; + } + + // If this is a tombstone, remember it. If Val ends up not in the map, we + // prefer to return it than something that would require more probing. + if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) && + !FoundTombstone) + FoundTombstone = ThisBucket; // Remember the first tombstone found. + + // Otherwise, it's a hash collision or a tombstone, continue quadratic + // probing. + BucketNo += ProbeAmt++; + BucketNo &= (NumBuckets-1); + } + } + + template + bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) { + const BucketT *ConstFoundBucket; + bool Result = const_cast(this) + ->LookupBucketFor(Val, ConstFoundBucket); + FoundBucket = const_cast(ConstFoundBucket); + return Result; + } + +public: + /// Return the approximate size (in bytes) of the actual map. + /// This is just the raw memory used by DenseMap. + /// If entries are pointers to objects, the size of the referenced objects + /// are not included. + size_t getMemorySize() const { + return getNumBuckets() * sizeof(BucketT); + } +}; + +/// Equality comparison for DenseMap. +/// +/// Iterates over elements of LHS confirming that each (key, value) pair in LHS +/// is also in RHS, and that no additional pairs are in RHS. +/// Equivalent to N calls to RHS.find and N value comparisons. Amortized +/// complexity is linear, worst case is O(N^2) (if every hash collides). +template +bool operator==( + const DenseMapBase &LHS, + const DenseMapBase &RHS) { + if (LHS.size() != RHS.size()) + return false; + + for (auto &KV : LHS) { + auto I = RHS.find(KV.first); + if (I == RHS.end() || I->second != KV.second) + return false; + } + + return true; +} + +/// Inequality comparison for DenseMap. +/// +/// Equivalent to !(LHS == RHS). See operator== for performance notes. +template +bool operator!=( + const DenseMapBase &LHS, + const DenseMapBase &RHS) { + return !(LHS == RHS); +} + +template , + typename BucketT = llvm::detail::DenseMapPair> +class DenseMap : public DenseMapBase, + KeyT, ValueT, KeyInfoT, BucketT> { + friend class DenseMapBase; + + // Lift some types from the dependent base class into this class for + // simplicity of referring to them. + using BaseT = DenseMapBase; + + BucketT *Buckets; + unsigned NumEntries; + unsigned NumTombstones; + unsigned NumBuckets; + +public: + /// Create a DenseMap wth an optional \p InitialReserve that guarantee that + /// this number of elements can be inserted in the map without grow() + explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); } + + DenseMap(const DenseMap &other) : BaseT() { + init(0); + copyFrom(other); + } + + DenseMap(DenseMap &&other) : BaseT() { + init(0); + swap(other); + } + + template + DenseMap(const InputIt &I, const InputIt &E) { + init(std::distance(I, E)); + this->insert(I, E); + } + + DenseMap(std::initializer_list Vals) { + init(Vals.size()); + this->insert(Vals.begin(), Vals.end()); + } + + ~DenseMap() { + this->destroyAll(); + deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT)); + } + + void swap(DenseMap& RHS) { + std::swap(Buckets, RHS.Buckets); + std::swap(NumEntries, RHS.NumEntries); + std::swap(NumTombstones, RHS.NumTombstones); + std::swap(NumBuckets, RHS.NumBuckets); + } + + DenseMap& operator=(const DenseMap& other) { + if (&other != this) + copyFrom(other); + return *this; + } + + DenseMap& operator=(DenseMap &&other) { + this->destroyAll(); + deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT)); + init(0); + swap(other); + return *this; + } + + void copyFrom(const DenseMap& other) { + this->destroyAll(); + deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT)); + if (allocateBuckets(other.NumBuckets)) { + this->BaseT::copyFrom(other); + } else { + NumEntries = 0; + NumTombstones = 0; + } + } + + void init(unsigned InitNumEntries) { + auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries); + if (allocateBuckets(InitBuckets)) { + this->BaseT::initEmpty(); + } else { + NumEntries = 0; + NumTombstones = 0; + } + } + + void grow(unsigned AtLeast) { + unsigned OldNumBuckets = NumBuckets; + BucketT *OldBuckets = Buckets; + + allocateBuckets(std::max(64, static_cast(NextPowerOf2(AtLeast-1)))); + assert(Buckets); + if (!OldBuckets) { + this->BaseT::initEmpty(); + return; + } + + this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets); + + // Free the old table. + deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets, + alignof(BucketT)); + } + + void shrink_and_clear() { + unsigned OldNumBuckets = NumBuckets; + unsigned OldNumEntries = NumEntries; + this->destroyAll(); + + // Reduce the number of buckets. + unsigned NewNumBuckets = 0; + if (OldNumEntries) + NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1)); + if (NewNumBuckets == NumBuckets) { + this->BaseT::initEmpty(); + return; + } + + deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets, + alignof(BucketT)); + init(NewNumBuckets); + } + +private: + unsigned getNumEntries() const { + return NumEntries; + } + + void setNumEntries(unsigned Num) { + NumEntries = Num; + } + + unsigned getNumTombstones() const { + return NumTombstones; + } + + void setNumTombstones(unsigned Num) { + NumTombstones = Num; + } + + BucketT *getBuckets() const { + return Buckets; + } + + unsigned getNumBuckets() const { + return NumBuckets; + } + + bool allocateBuckets(unsigned Num) { + NumBuckets = Num; + if (NumBuckets == 0) { + Buckets = nullptr; + return false; + } + + Buckets = static_cast( + allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT))); + return true; + } +}; + +template , + typename BucketT = llvm::detail::DenseMapPair> +class SmallDenseMap + : public DenseMapBase< + SmallDenseMap, KeyT, + ValueT, KeyInfoT, BucketT> { + friend class DenseMapBase; + + // Lift some types from the dependent base class into this class for + // simplicity of referring to them. + using BaseT = DenseMapBase; + + static_assert(isPowerOf2_64(InlineBuckets), + "InlineBuckets must be a power of 2."); + + unsigned Small : 1; + unsigned NumEntries : 31; + unsigned NumTombstones; + + struct LargeRep { + BucketT *Buckets; + unsigned NumBuckets; + }; + + /// A "union" of an inline bucket array and the struct representing + /// a large bucket. This union will be discriminated by the 'Small' bit. + AlignedCharArrayUnion storage; + +public: + explicit SmallDenseMap(unsigned NumInitBuckets = 0) { + init(NumInitBuckets); + } + + SmallDenseMap(const SmallDenseMap &other) : BaseT() { + init(0); + copyFrom(other); + } + + SmallDenseMap(SmallDenseMap &&other) : BaseT() { + init(0); + swap(other); + } + + template + SmallDenseMap(const InputIt &I, const InputIt &E) { + init(NextPowerOf2(std::distance(I, E))); + this->insert(I, E); + } + + ~SmallDenseMap() { + this->destroyAll(); + deallocateBuckets(); + } + + void swap(SmallDenseMap& RHS) { + unsigned TmpNumEntries = RHS.NumEntries; + RHS.NumEntries = NumEntries; + NumEntries = TmpNumEntries; + std::swap(NumTombstones, RHS.NumTombstones); + + const KeyT EmptyKey = this->getEmptyKey(); + const KeyT TombstoneKey = this->getTombstoneKey(); + if (Small && RHS.Small) { + // If we're swapping inline bucket arrays, we have to cope with some of + // the tricky bits of DenseMap's storage system: the buckets are not + // fully initialized. Thus we swap every key, but we may have + // a one-directional move of the value. + for (unsigned i = 0, e = InlineBuckets; i != e; ++i) { + BucketT *LHSB = &getInlineBuckets()[i], + *RHSB = &RHS.getInlineBuckets()[i]; + bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) && + !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey)); + bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) && + !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey)); + if (hasLHSValue && hasRHSValue) { + // Swap together if we can... + std::swap(*LHSB, *RHSB); + continue; + } + // Swap separately and handle any assymetry. + std::swap(LHSB->getFirst(), RHSB->getFirst()); + if (hasLHSValue) { + ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond())); + LHSB->getSecond().~ValueT(); + } else if (hasRHSValue) { + ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond())); + RHSB->getSecond().~ValueT(); + } + } + return; + } + if (!Small && !RHS.Small) { + std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets); + std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets); + return; + } + + SmallDenseMap &SmallSide = Small ? *this : RHS; + SmallDenseMap &LargeSide = Small ? RHS : *this; + + // First stash the large side's rep and move the small side across. + LargeRep TmpRep = std::move(*LargeSide.getLargeRep()); + LargeSide.getLargeRep()->~LargeRep(); + LargeSide.Small = true; + // This is similar to the standard move-from-old-buckets, but the bucket + // count hasn't actually rotated in this case. So we have to carefully + // move construct the keys and values into their new locations, but there + // is no need to re-hash things. + for (unsigned i = 0, e = InlineBuckets; i != e; ++i) { + BucketT *NewB = &LargeSide.getInlineBuckets()[i], + *OldB = &SmallSide.getInlineBuckets()[i]; + ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst())); + OldB->getFirst().~KeyT(); + if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) && + !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) { + ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond())); + OldB->getSecond().~ValueT(); + } + } + + // The hard part of moving the small buckets across is done, just move + // the TmpRep into its new home. + SmallSide.Small = false; + new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep)); + } + + SmallDenseMap& operator=(const SmallDenseMap& other) { + if (&other != this) + copyFrom(other); + return *this; + } + + SmallDenseMap& operator=(SmallDenseMap &&other) { + this->destroyAll(); + deallocateBuckets(); + init(0); + swap(other); + return *this; + } + + void copyFrom(const SmallDenseMap& other) { + this->destroyAll(); + deallocateBuckets(); + Small = true; + if (other.getNumBuckets() > InlineBuckets) { + Small = false; + new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets())); + } + this->BaseT::copyFrom(other); + } + + void init(unsigned InitBuckets) { + Small = true; + if (InitBuckets > InlineBuckets) { + Small = false; + new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets)); + } + this->BaseT::initEmpty(); + } + + void grow(unsigned AtLeast) { + if (AtLeast > InlineBuckets) + AtLeast = std::max(64, NextPowerOf2(AtLeast-1)); + + if (Small) { + // First move the inline buckets into a temporary storage. + AlignedCharArrayUnion TmpStorage; + BucketT *TmpBegin = reinterpret_cast(TmpStorage.buffer); + BucketT *TmpEnd = TmpBegin; + + // Loop over the buckets, moving non-empty, non-tombstones into the + // temporary storage. Have the loop move the TmpEnd forward as it goes. + const KeyT EmptyKey = this->getEmptyKey(); + const KeyT TombstoneKey = this->getTombstoneKey(); + for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) { + if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) && + !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) { + assert(size_t(TmpEnd - TmpBegin) < InlineBuckets && + "Too many inline buckets!"); + ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst())); + ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond())); + ++TmpEnd; + P->getSecond().~ValueT(); + } + P->getFirst().~KeyT(); + } + + // AtLeast == InlineBuckets can happen if there are many tombstones, + // and grow() is used to remove them. Usually we always switch to the + // large rep here. + if (AtLeast > InlineBuckets) { + Small = false; + new (getLargeRep()) LargeRep(allocateBuckets(AtLeast)); + } + this->moveFromOldBuckets(TmpBegin, TmpEnd); + return; + } + + LargeRep OldRep = std::move(*getLargeRep()); + getLargeRep()->~LargeRep(); + if (AtLeast <= InlineBuckets) { + Small = true; + } else { + new (getLargeRep()) LargeRep(allocateBuckets(AtLeast)); + } + + this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets); + + // Free the old table. + deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets, + alignof(BucketT)); + } + + void shrink_and_clear() { + unsigned OldSize = this->size(); + this->destroyAll(); + + // Reduce the number of buckets. + unsigned NewNumBuckets = 0; + if (OldSize) { + NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1); + if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u) + NewNumBuckets = 64; + } + if ((Small && NewNumBuckets <= InlineBuckets) || + (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) { + this->BaseT::initEmpty(); + return; + } + + deallocateBuckets(); + init(NewNumBuckets); + } + +private: + unsigned getNumEntries() const { + return NumEntries; + } + + void setNumEntries(unsigned Num) { + // NumEntries is hardcoded to be 31 bits wide. + assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries"); + NumEntries = Num; + } + + unsigned getNumTombstones() const { + return NumTombstones; + } + + void setNumTombstones(unsigned Num) { + NumTombstones = Num; + } + + const BucketT *getInlineBuckets() const { + assert(Small); + // Note that this cast does not violate aliasing rules as we assert that + // the memory's dynamic type is the small, inline bucket buffer, and the + // 'storage.buffer' static type is 'char *'. + return reinterpret_cast(storage.buffer); + } + + BucketT *getInlineBuckets() { + return const_cast( + const_cast(this)->getInlineBuckets()); + } + + const LargeRep *getLargeRep() const { + assert(!Small); + // Note, same rule about aliasing as with getInlineBuckets. + return reinterpret_cast(storage.buffer); + } + + LargeRep *getLargeRep() { + return const_cast( + const_cast(this)->getLargeRep()); + } + + const BucketT *getBuckets() const { + return Small ? getInlineBuckets() : getLargeRep()->Buckets; + } + + BucketT *getBuckets() { + return const_cast( + const_cast(this)->getBuckets()); + } + + unsigned getNumBuckets() const { + return Small ? InlineBuckets : getLargeRep()->NumBuckets; + } + + void deallocateBuckets() { + if (Small) + return; + + deallocate_buffer(getLargeRep()->Buckets, + sizeof(BucketT) * getLargeRep()->NumBuckets, + alignof(BucketT)); + getLargeRep()->~LargeRep(); + } + + LargeRep allocateBuckets(unsigned Num) { + assert(Num > InlineBuckets && "Must allocate more buckets than are inline"); + LargeRep Rep = {static_cast(allocate_buffer( + sizeof(BucketT) * Num, alignof(BucketT))), + Num}; + return Rep; + } +}; + +template +class DenseMapIterator { + friend class DenseMapIterator; + friend class DenseMapIterator; + + using ConstIterator = DenseMapIterator; + +public: + using difference_type = ptrdiff_t; + using value_type = + typename std::conditional::type; + using pointer = value_type *; + using reference = value_type &; + using iterator_category = std::forward_iterator_tag; + +private: + pointer Ptr = nullptr; + pointer End = nullptr; + +public: + DenseMapIterator() = default; + + DenseMapIterator(pointer Pos, pointer E, bool NoAdvance = false) + : Ptr(Pos), End(E) { + if (NoAdvance) return; + AdvancePastEmptyBuckets(); + } + + // Converting ctor from non-const iterators to const iterators. SFINAE'd out + // for const iterator destinations so it doesn't end up as a user defined copy + // constructor. + template > + DenseMapIterator( + const DenseMapIterator &I) + : Ptr(I.Ptr), End(I.End) {} + + reference operator*() const { + assert(Ptr != End && "dereferencing end() iterator"); + return *Ptr; + } + pointer operator->() const { + assert(Ptr != End && "dereferencing end() iterator"); + return Ptr; + } + + bool operator==(const ConstIterator &RHS) const { + return Ptr == RHS.Ptr; + } + bool operator!=(const ConstIterator &RHS) const { + return Ptr != RHS.Ptr; + } + + inline DenseMapIterator& operator++() { // Preincrement + assert(Ptr != End && "incrementing end() iterator"); + ++Ptr; + AdvancePastEmptyBuckets(); + return *this; + } + DenseMapIterator operator++(int) { // Postincrement + DenseMapIterator tmp = *this; ++*this; return tmp; + } + +private: + void AdvancePastEmptyBuckets() { + assert(Ptr <= End); + const KeyT Empty = KeyInfoT::getEmptyKey(); + const KeyT Tombstone = KeyInfoT::getTombstoneKey(); + + while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) || + KeyInfoT::isEqual(Ptr->getFirst(), Tombstone))) + ++Ptr; + } + + void RetreatPastEmptyBuckets() { + assert(Ptr >= End); + const KeyT Empty = KeyInfoT::getEmptyKey(); + const KeyT Tombstone = KeyInfoT::getTombstoneKey(); + + while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) || + KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone))) + --Ptr; + } +}; + +template +inline size_t capacity_in_bytes(const DenseMap &X) { + return X.getMemorySize(); +} + +} // end namespace llvm +}} // namespace swift::runtime + +#endif // LLVM_ADT_DENSEMAP_H diff --git a/Sources/_CJavaScriptEventLoop/include/llvm/ADT/DenseMapInfo.h b/Sources/_CJavaScriptEventLoop/include/llvm/ADT/DenseMapInfo.h new file mode 100644 index 00000000..f648fb3d --- /dev/null +++ b/Sources/_CJavaScriptEventLoop/include/llvm/ADT/DenseMapInfo.h @@ -0,0 +1,354 @@ +//===- llvm/ADT/DenseMapInfo.h - Type traits for DenseMap -------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines DenseMapInfo traits for DenseMap. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_DENSEMAPINFO_H +#define LLVM_ADT_DENSEMAPINFO_H + +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/Hashing.h" +#include "llvm/ADT/StringRef.h" +#include +#include +#include +#include + +inline namespace __swift { inline namespace __runtime { +namespace llvm { + +namespace detail { + +/// Simplistic combination of 32-bit hash values into 32-bit hash values. +static inline unsigned combineHashValue(unsigned a, unsigned b) { + uint64_t key = (uint64_t)a << 32 | (uint64_t)b; + key += ~(key << 32); + key ^= (key >> 22); + key += ~(key << 13); + key ^= (key >> 8); + key += (key << 3); + key ^= (key >> 15); + key += ~(key << 27); + key ^= (key >> 31); + return (unsigned)key; +} + +} // end namespace detail + +template +struct DenseMapInfo { + //static inline T getEmptyKey(); + //static inline T getTombstoneKey(); + //static unsigned getHashValue(const T &Val); + //static bool isEqual(const T &LHS, const T &RHS); +}; + +// Provide DenseMapInfo for all pointers. Come up with sentinel pointer values +// that are aligned to alignof(T) bytes, but try to avoid requiring T to be +// complete. This allows clients to instantiate DenseMap with forward +// declared key types. Assume that no pointer key type requires more than 4096 +// bytes of alignment. +template +struct DenseMapInfo { + // The following should hold, but it would require T to be complete: + // static_assert(alignof(T) <= (1 << Log2MaxAlign), + // "DenseMap does not support pointer keys requiring more than " + // "Log2MaxAlign bits of alignment"); + static constexpr uintptr_t Log2MaxAlign = 12; + + static inline T* getEmptyKey() { + uintptr_t Val = static_cast(-1); + Val <<= Log2MaxAlign; + return reinterpret_cast(Val); + } + + static inline T* getTombstoneKey() { + uintptr_t Val = static_cast(-2); + Val <<= Log2MaxAlign; + return reinterpret_cast(Val); + } + + static unsigned getHashValue(const T *PtrVal) { + return (unsigned((uintptr_t)PtrVal) >> 4) ^ + (unsigned((uintptr_t)PtrVal) >> 9); + } + + static bool isEqual(const T *LHS, const T *RHS) { return LHS == RHS; } +}; + +// Provide DenseMapInfo for chars. +template<> struct DenseMapInfo { + static inline char getEmptyKey() { return ~0; } + static inline char getTombstoneKey() { return ~0 - 1; } + static unsigned getHashValue(const char& Val) { return Val * 37U; } + + static bool isEqual(const char &LHS, const char &RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for unsigned chars. +template <> struct DenseMapInfo { + static inline unsigned char getEmptyKey() { return ~0; } + static inline unsigned char getTombstoneKey() { return ~0 - 1; } + static unsigned getHashValue(const unsigned char &Val) { return Val * 37U; } + + static bool isEqual(const unsigned char &LHS, const unsigned char &RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for unsigned shorts. +template <> struct DenseMapInfo { + static inline unsigned short getEmptyKey() { return 0xFFFF; } + static inline unsigned short getTombstoneKey() { return 0xFFFF - 1; } + static unsigned getHashValue(const unsigned short &Val) { return Val * 37U; } + + static bool isEqual(const unsigned short &LHS, const unsigned short &RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for unsigned ints. +template<> struct DenseMapInfo { + static inline unsigned getEmptyKey() { return ~0U; } + static inline unsigned getTombstoneKey() { return ~0U - 1; } + static unsigned getHashValue(const unsigned& Val) { return Val * 37U; } + + static bool isEqual(const unsigned& LHS, const unsigned& RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for unsigned longs. +template<> struct DenseMapInfo { + static inline unsigned long getEmptyKey() { return ~0UL; } + static inline unsigned long getTombstoneKey() { return ~0UL - 1L; } + + static unsigned getHashValue(const unsigned long& Val) { + return (unsigned)(Val * 37UL); + } + + static bool isEqual(const unsigned long& LHS, const unsigned long& RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for unsigned long longs. +template<> struct DenseMapInfo { + static inline unsigned long long getEmptyKey() { return ~0ULL; } + static inline unsigned long long getTombstoneKey() { return ~0ULL - 1ULL; } + + static unsigned getHashValue(const unsigned long long& Val) { + return (unsigned)(Val * 37ULL); + } + + static bool isEqual(const unsigned long long& LHS, + const unsigned long long& RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for shorts. +template <> struct DenseMapInfo { + static inline short getEmptyKey() { return 0x7FFF; } + static inline short getTombstoneKey() { return -0x7FFF - 1; } + static unsigned getHashValue(const short &Val) { return Val * 37U; } + static bool isEqual(const short &LHS, const short &RHS) { return LHS == RHS; } +}; + +// Provide DenseMapInfo for ints. +template<> struct DenseMapInfo { + static inline int getEmptyKey() { return 0x7fffffff; } + static inline int getTombstoneKey() { return -0x7fffffff - 1; } + static unsigned getHashValue(const int& Val) { return (unsigned)(Val * 37U); } + + static bool isEqual(const int& LHS, const int& RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for longs. +template<> struct DenseMapInfo { + static inline long getEmptyKey() { + return (1UL << (sizeof(long) * 8 - 1)) - 1UL; + } + + static inline long getTombstoneKey() { return getEmptyKey() - 1L; } + + static unsigned getHashValue(const long& Val) { + return (unsigned)(Val * 37UL); + } + + static bool isEqual(const long& LHS, const long& RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for long longs. +template<> struct DenseMapInfo { + static inline long long getEmptyKey() { return 0x7fffffffffffffffLL; } + static inline long long getTombstoneKey() { return -0x7fffffffffffffffLL-1; } + + static unsigned getHashValue(const long long& Val) { + return (unsigned)(Val * 37ULL); + } + + static bool isEqual(const long long& LHS, + const long long& RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for all pairs whose members have info. +template +struct DenseMapInfo> { + using Pair = std::pair; + using FirstInfo = DenseMapInfo; + using SecondInfo = DenseMapInfo; + + static inline Pair getEmptyKey() { + return std::make_pair(FirstInfo::getEmptyKey(), + SecondInfo::getEmptyKey()); + } + + static inline Pair getTombstoneKey() { + return std::make_pair(FirstInfo::getTombstoneKey(), + SecondInfo::getTombstoneKey()); + } + + static unsigned getHashValue(const Pair& PairVal) { + return detail::combineHashValue(FirstInfo::getHashValue(PairVal.first), + SecondInfo::getHashValue(PairVal.second)); + } + + static bool isEqual(const Pair &LHS, const Pair &RHS) { + return FirstInfo::isEqual(LHS.first, RHS.first) && + SecondInfo::isEqual(LHS.second, RHS.second); + } +}; + +// Provide DenseMapInfo for all tuples whose members have info. +template struct DenseMapInfo> { + using Tuple = std::tuple; + + static inline Tuple getEmptyKey() { + return Tuple(DenseMapInfo::getEmptyKey()...); + } + + static inline Tuple getTombstoneKey() { + return Tuple(DenseMapInfo::getTombstoneKey()...); + } + + template + static unsigned getHashValueImpl(const Tuple &values, std::false_type) { + using EltType = typename std::tuple_element::type; + std::integral_constant atEnd; + return detail::combineHashValue( + DenseMapInfo::getHashValue(std::get(values)), + getHashValueImpl(values, atEnd)); + } + + template + static unsigned getHashValueImpl(const Tuple &values, std::true_type) { + return 0; + } + + static unsigned getHashValue(const std::tuple &values) { + std::integral_constant atEnd; + return getHashValueImpl<0>(values, atEnd); + } + + template + static bool isEqualImpl(const Tuple &lhs, const Tuple &rhs, std::false_type) { + using EltType = typename std::tuple_element::type; + std::integral_constant atEnd; + return DenseMapInfo::isEqual(std::get(lhs), std::get(rhs)) && + isEqualImpl(lhs, rhs, atEnd); + } + + template + static bool isEqualImpl(const Tuple &lhs, const Tuple &rhs, std::true_type) { + return true; + } + + static bool isEqual(const Tuple &lhs, const Tuple &rhs) { + std::integral_constant atEnd; + return isEqualImpl<0>(lhs, rhs, atEnd); + } +}; + +// Provide DenseMapInfo for StringRefs. +template <> struct DenseMapInfo { + static inline StringRef getEmptyKey() { + return StringRef(reinterpret_cast(~static_cast(0)), + 0); + } + + static inline StringRef getTombstoneKey() { + return StringRef(reinterpret_cast(~static_cast(1)), + 0); + } + + static unsigned getHashValue(StringRef Val) { + assert(Val.data() != getEmptyKey().data() && "Cannot hash the empty key!"); + assert(Val.data() != getTombstoneKey().data() && + "Cannot hash the tombstone key!"); + return (unsigned)(hash_value(Val)); + } + + static bool isEqual(StringRef LHS, StringRef RHS) { + if (RHS.data() == getEmptyKey().data()) + return LHS.data() == getEmptyKey().data(); + if (RHS.data() == getTombstoneKey().data()) + return LHS.data() == getTombstoneKey().data(); + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for ArrayRefs. +template struct DenseMapInfo> { + static inline ArrayRef getEmptyKey() { + return ArrayRef(reinterpret_cast(~static_cast(0)), + size_t(0)); + } + + static inline ArrayRef getTombstoneKey() { + return ArrayRef(reinterpret_cast(~static_cast(1)), + size_t(0)); + } + + static unsigned getHashValue(ArrayRef Val) { + assert(Val.data() != getEmptyKey().data() && "Cannot hash the empty key!"); + assert(Val.data() != getTombstoneKey().data() && + "Cannot hash the tombstone key!"); + return (unsigned)(hash_value(Val)); + } + + static bool isEqual(ArrayRef LHS, ArrayRef RHS) { + if (RHS.data() == getEmptyKey().data()) + return LHS.data() == getEmptyKey().data(); + if (RHS.data() == getTombstoneKey().data()) + return LHS.data() == getTombstoneKey().data(); + return LHS == RHS; + } +}; + +template <> struct DenseMapInfo { + static inline hash_code getEmptyKey() { return hash_code(-1); } + static inline hash_code getTombstoneKey() { return hash_code(-2); } + static unsigned getHashValue(hash_code val) { return val; } + static bool isEqual(hash_code LHS, hash_code RHS) { return LHS == RHS; } +}; + +} // end namespace llvm +}} // namespace swift::runtime + +#endif // LLVM_ADT_DENSEMAPINFO_H diff --git a/Sources/_CJavaScriptEventLoop/include/llvm/ADT/DenseSet.h b/Sources/_CJavaScriptEventLoop/include/llvm/ADT/DenseSet.h new file mode 100644 index 00000000..ae8109f3 --- /dev/null +++ b/Sources/_CJavaScriptEventLoop/include/llvm/ADT/DenseSet.h @@ -0,0 +1,291 @@ +//===- llvm/ADT/DenseSet.h - Dense probed hash table ------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the DenseSet and SmallDenseSet classes. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_DENSESET_H +#define LLVM_ADT_DENSESET_H + +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/DenseMapInfo.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/type_traits.h" +#include +#include +#include +#include +#include + +inline namespace __swift { inline namespace __runtime { +namespace llvm { + +namespace detail { + +struct DenseSetEmpty {}; + +// Use the empty base class trick so we can create a DenseMap where the buckets +// contain only a single item. +template class DenseSetPair : public DenseSetEmpty { + KeyT key; + +public: + KeyT &getFirst() { return key; } + const KeyT &getFirst() const { return key; } + DenseSetEmpty &getSecond() { return *this; } + const DenseSetEmpty &getSecond() const { return *this; } +}; + +/// Base class for DenseSet and DenseSmallSet. +/// +/// MapTy should be either +/// +/// DenseMap> +/// +/// or the equivalent SmallDenseMap type. ValueInfoT must implement the +/// DenseMapInfo "concept". +template +class DenseSetImpl { + static_assert(sizeof(typename MapTy::value_type) == sizeof(ValueT), + "DenseMap buckets unexpectedly large!"); + MapTy TheMap; + + template + using const_arg_type_t = typename const_pointer_or_const_ref::type; + +public: + using key_type = ValueT; + using value_type = ValueT; + using size_type = unsigned; + + explicit DenseSetImpl(unsigned InitialReserve = 0) : TheMap(InitialReserve) {} + + template + DenseSetImpl(const InputIt &I, const InputIt &E) + : DenseSetImpl(PowerOf2Ceil(std::distance(I, E))) { + insert(I, E); + } + + DenseSetImpl(std::initializer_list Elems) + : DenseSetImpl(PowerOf2Ceil(Elems.size())) { + insert(Elems.begin(), Elems.end()); + } + + bool empty() const { return TheMap.empty(); } + size_type size() const { return TheMap.size(); } + size_t getMemorySize() const { return TheMap.getMemorySize(); } + + /// Grow the DenseSet so that it has at least Size buckets. Will not shrink + /// the Size of the set. + void resize(size_t Size) { TheMap.resize(Size); } + + /// Grow the DenseSet so that it can contain at least \p NumEntries items + /// before resizing again. + void reserve(size_t Size) { TheMap.reserve(Size); } + + void clear() { + TheMap.clear(); + } + + /// Return 1 if the specified key is in the set, 0 otherwise. + size_type count(const_arg_type_t V) const { + return TheMap.count(V); + } + + bool erase(const ValueT &V) { + return TheMap.erase(V); + } + + void swap(DenseSetImpl &RHS) { TheMap.swap(RHS.TheMap); } + + // Iterators. + + class ConstIterator; + + class Iterator { + typename MapTy::iterator I; + friend class DenseSetImpl; + friend class ConstIterator; + + public: + using difference_type = typename MapTy::iterator::difference_type; + using value_type = ValueT; + using pointer = value_type *; + using reference = value_type &; + using iterator_category = std::forward_iterator_tag; + + Iterator() = default; + Iterator(const typename MapTy::iterator &i) : I(i) {} + + ValueT &operator*() { return I->getFirst(); } + const ValueT &operator*() const { return I->getFirst(); } + ValueT *operator->() { return &I->getFirst(); } + const ValueT *operator->() const { return &I->getFirst(); } + + Iterator& operator++() { ++I; return *this; } + Iterator operator++(int) { auto T = *this; ++I; return T; } + bool operator==(const ConstIterator& X) const { return I == X.I; } + bool operator!=(const ConstIterator& X) const { return I != X.I; } + }; + + class ConstIterator { + typename MapTy::const_iterator I; + friend class DenseSetImpl; + friend class Iterator; + + public: + using difference_type = typename MapTy::const_iterator::difference_type; + using value_type = ValueT; + using pointer = const value_type *; + using reference = const value_type &; + using iterator_category = std::forward_iterator_tag; + + ConstIterator() = default; + ConstIterator(const Iterator &B) : I(B.I) {} + ConstIterator(const typename MapTy::const_iterator &i) : I(i) {} + + const ValueT &operator*() const { return I->getFirst(); } + const ValueT *operator->() const { return &I->getFirst(); } + + ConstIterator& operator++() { ++I; return *this; } + ConstIterator operator++(int) { auto T = *this; ++I; return T; } + bool operator==(const ConstIterator& X) const { return I == X.I; } + bool operator!=(const ConstIterator& X) const { return I != X.I; } + }; + + using iterator = Iterator; + using const_iterator = ConstIterator; + + iterator begin() { return Iterator(TheMap.begin()); } + iterator end() { return Iterator(TheMap.end()); } + + const_iterator begin() const { return ConstIterator(TheMap.begin()); } + const_iterator end() const { return ConstIterator(TheMap.end()); } + + iterator find(const_arg_type_t V) { return Iterator(TheMap.find(V)); } + const_iterator find(const_arg_type_t V) const { + return ConstIterator(TheMap.find(V)); + } + + /// Alternative version of find() which allows a different, and possibly less + /// expensive, key type. + /// The DenseMapInfo is responsible for supplying methods + /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key type + /// used. + template + iterator find_as(const LookupKeyT &Val) { + return Iterator(TheMap.find_as(Val)); + } + template + const_iterator find_as(const LookupKeyT &Val) const { + return ConstIterator(TheMap.find_as(Val)); + } + + void erase(Iterator I) { return TheMap.erase(I.I); } + void erase(ConstIterator CI) { return TheMap.erase(CI.I); } + + std::pair insert(const ValueT &V) { + detail::DenseSetEmpty Empty; + return TheMap.try_emplace(V, Empty); + } + + std::pair insert(ValueT &&V) { + detail::DenseSetEmpty Empty; + return TheMap.try_emplace(std::move(V), Empty); + } + + /// Alternative version of insert that uses a different (and possibly less + /// expensive) key type. + template + std::pair insert_as(const ValueT &V, + const LookupKeyT &LookupKey) { + return TheMap.insert_as({V, detail::DenseSetEmpty()}, LookupKey); + } + template + std::pair insert_as(ValueT &&V, const LookupKeyT &LookupKey) { + return TheMap.insert_as({std::move(V), detail::DenseSetEmpty()}, LookupKey); + } + + // Range insertion of values. + template + void insert(InputIt I, InputIt E) { + for (; I != E; ++I) + insert(*I); + } +}; + +/// Equality comparison for DenseSet. +/// +/// Iterates over elements of LHS confirming that each element is also a member +/// of RHS, and that RHS contains no additional values. +/// Equivalent to N calls to RHS.count. Amortized complexity is linear, worst +/// case is O(N^2) (if every hash collides). +template +bool operator==(const DenseSetImpl &LHS, + const DenseSetImpl &RHS) { + if (LHS.size() != RHS.size()) + return false; + + for (auto &E : LHS) + if (!RHS.count(E)) + return false; + + return true; +} + +/// Inequality comparison for DenseSet. +/// +/// Equivalent to !(LHS == RHS). See operator== for performance notes. +template +bool operator!=(const DenseSetImpl &LHS, + const DenseSetImpl &RHS) { + return !(LHS == RHS); +} + +} // end namespace detail + +/// Implements a dense probed hash-table based set. +template > +class DenseSet : public detail::DenseSetImpl< + ValueT, DenseMap>, + ValueInfoT> { + using BaseT = + detail::DenseSetImpl>, + ValueInfoT>; + +public: + using BaseT::BaseT; +}; + +/// Implements a dense probed hash-table based set with some number of buckets +/// stored inline. +template > +class SmallDenseSet + : public detail::DenseSetImpl< + ValueT, SmallDenseMap>, + ValueInfoT> { + using BaseT = detail::DenseSetImpl< + ValueT, SmallDenseMap>, + ValueInfoT>; + +public: + using BaseT::BaseT; +}; + +} // end namespace llvm +}} // namespace swift::runtime + +#endif // LLVM_ADT_DENSESET_H diff --git a/Sources/_CJavaScriptEventLoop/include/llvm/ADT/Hashing.h b/Sources/_CJavaScriptEventLoop/include/llvm/ADT/Hashing.h new file mode 100644 index 00000000..7418fd01 --- /dev/null +++ b/Sources/_CJavaScriptEventLoop/include/llvm/ADT/Hashing.h @@ -0,0 +1,659 @@ +//===-- llvm/ADT/Hashing.h - Utilities for hashing --------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements the newly proposed standard C++ interfaces for hashing +// arbitrary data and building hash functions for user-defined types. This +// interface was originally proposed in N3333[1] and is currently under review +// for inclusion in a future TR and/or standard. +// +// The primary interfaces provide are comprised of one type and three functions: +// +// -- 'hash_code' class is an opaque type representing the hash code for some +// data. It is the intended product of hashing, and can be used to implement +// hash tables, checksumming, and other common uses of hashes. It is not an +// integer type (although it can be converted to one) because it is risky +// to assume much about the internals of a hash_code. In particular, each +// execution of the program has a high probability of producing a different +// hash_code for a given input. Thus their values are not stable to save or +// persist, and should only be used during the execution for the +// construction of hashing datastructures. +// +// -- 'hash_value' is a function designed to be overloaded for each +// user-defined type which wishes to be used within a hashing context. It +// should be overloaded within the user-defined type's namespace and found +// via ADL. Overloads for primitive types are provided by this library. +// +// -- 'hash_combine' and 'hash_combine_range' are functions designed to aid +// programmers in easily and intuitively combining a set of data into +// a single hash_code for their object. They should only logically be used +// within the implementation of a 'hash_value' routine or similar context. +// +// Note that 'hash_combine_range' contains very special logic for hashing +// a contiguous array of integers or pointers. This logic is *extremely* fast, +// on a modern Intel "Gainestown" Xeon (Nehalem uarch) @2.2 GHz, these were +// benchmarked at over 6.5 GiB/s for large keys, and <20 cycles/hash for keys +// under 32-bytes. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_HASHING_H +#define LLVM_ADT_HASHING_H + +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/SwapByteOrder.h" +#include "llvm/Support/type_traits.h" +#include +#include +#include +#include +#include +#include + +inline namespace __swift { inline namespace __runtime { +namespace llvm { + +/// An opaque object representing a hash code. +/// +/// This object represents the result of hashing some entity. It is intended to +/// be used to implement hashtables or other hashing-based data structures. +/// While it wraps and exposes a numeric value, this value should not be +/// trusted to be stable or predictable across processes or executions. +/// +/// In order to obtain the hash_code for an object 'x': +/// \code +/// using llvm::hash_value; +/// llvm::hash_code code = hash_value(x); +/// \endcode +class hash_code { + size_t value; + +public: + /// Default construct a hash_code. + /// Note that this leaves the value uninitialized. + hash_code() = default; + + /// Form a hash code directly from a numerical value. + hash_code(size_t value) : value(value) {} + + /// Convert the hash code to its numerical value for use. + /*explicit*/ operator size_t() const { return value; } + + friend bool operator==(const hash_code &lhs, const hash_code &rhs) { + return lhs.value == rhs.value; + } + friend bool operator!=(const hash_code &lhs, const hash_code &rhs) { + return lhs.value != rhs.value; + } + + /// Allow a hash_code to be directly run through hash_value. + friend size_t hash_value(const hash_code &code) { return code.value; } +}; + +/// Compute a hash_code for any integer value. +/// +/// Note that this function is intended to compute the same hash_code for +/// a particular value without regard to the pre-promotion type. This is in +/// contrast to hash_combine which may produce different hash_codes for +/// differing argument types even if they would implicit promote to a common +/// type without changing the value. +template +std::enable_if_t::value, hash_code> hash_value(T value); + +/// Compute a hash_code for a pointer's address. +/// +/// N.B.: This hashes the *address*. Not the value and not the type. +template hash_code hash_value(const T *ptr); + +/// Compute a hash_code for a pair of objects. +template +hash_code hash_value(const std::pair &arg); + +/// Compute a hash_code for a standard string. +template +hash_code hash_value(const std::basic_string &arg); + + +/// Override the execution seed with a fixed value. +/// +/// This hashing library uses a per-execution seed designed to change on each +/// run with high probability in order to ensure that the hash codes are not +/// attackable and to ensure that output which is intended to be stable does +/// not rely on the particulars of the hash codes produced. +/// +/// That said, there are use cases where it is important to be able to +/// reproduce *exactly* a specific behavior. To that end, we provide a function +/// which will forcibly set the seed to a fixed value. This must be done at the +/// start of the program, before any hashes are computed. Also, it cannot be +/// undone. This makes it thread-hostile and very hard to use outside of +/// immediately on start of a simple program designed for reproducible +/// behavior. +void set_fixed_execution_hash_seed(uint64_t fixed_value); + + +// All of the implementation details of actually computing the various hash +// code values are held within this namespace. These routines are included in +// the header file mainly to allow inlining and constant propagation. +namespace hashing { +namespace detail { + +inline uint64_t fetch64(const char *p) { + uint64_t result; + memcpy(&result, p, sizeof(result)); + if (sys::IsBigEndianHost) + sys::swapByteOrder(result); + return result; +} + +inline uint32_t fetch32(const char *p) { + uint32_t result; + memcpy(&result, p, sizeof(result)); + if (sys::IsBigEndianHost) + sys::swapByteOrder(result); + return result; +} + +/// Some primes between 2^63 and 2^64 for various uses. +static constexpr uint64_t k0 = 0xc3a5c85c97cb3127ULL; +static constexpr uint64_t k1 = 0xb492b66fbe98f273ULL; +static constexpr uint64_t k2 = 0x9ae16a3b2f90404fULL; +static constexpr uint64_t k3 = 0xc949d7c7509e6557ULL; + +/// Bitwise right rotate. +/// Normally this will compile to a single instruction, especially if the +/// shift is a manifest constant. +inline uint64_t rotate(uint64_t val, size_t shift) { + // Avoid shifting by 64: doing so yields an undefined result. + return shift == 0 ? val : ((val >> shift) | (val << (64 - shift))); +} + +inline uint64_t shift_mix(uint64_t val) { + return val ^ (val >> 47); +} + +inline uint64_t hash_16_bytes(uint64_t low, uint64_t high) { + // Murmur-inspired hashing. + const uint64_t kMul = 0x9ddfea08eb382d69ULL; + uint64_t a = (low ^ high) * kMul; + a ^= (a >> 47); + uint64_t b = (high ^ a) * kMul; + b ^= (b >> 47); + b *= kMul; + return b; +} + +inline uint64_t hash_1to3_bytes(const char *s, size_t len, uint64_t seed) { + uint8_t a = s[0]; + uint8_t b = s[len >> 1]; + uint8_t c = s[len - 1]; + uint32_t y = static_cast(a) + (static_cast(b) << 8); + uint32_t z = static_cast(len) + (static_cast(c) << 2); + return shift_mix(y * k2 ^ z * k3 ^ seed) * k2; +} + +inline uint64_t hash_4to8_bytes(const char *s, size_t len, uint64_t seed) { + uint64_t a = fetch32(s); + return hash_16_bytes(len + (a << 3), seed ^ fetch32(s + len - 4)); +} + +inline uint64_t hash_9to16_bytes(const char *s, size_t len, uint64_t seed) { + uint64_t a = fetch64(s); + uint64_t b = fetch64(s + len - 8); + return hash_16_bytes(seed ^ a, rotate(b + len, len)) ^ b; +} + +inline uint64_t hash_17to32_bytes(const char *s, size_t len, uint64_t seed) { + uint64_t a = fetch64(s) * k1; + uint64_t b = fetch64(s + 8); + uint64_t c = fetch64(s + len - 8) * k2; + uint64_t d = fetch64(s + len - 16) * k0; + return hash_16_bytes(rotate(a - b, 43) + rotate(c ^ seed, 30) + d, + a + rotate(b ^ k3, 20) - c + len + seed); +} + +inline uint64_t hash_33to64_bytes(const char *s, size_t len, uint64_t seed) { + uint64_t z = fetch64(s + 24); + uint64_t a = fetch64(s) + (len + fetch64(s + len - 16)) * k0; + uint64_t b = rotate(a + z, 52); + uint64_t c = rotate(a, 37); + a += fetch64(s + 8); + c += rotate(a, 7); + a += fetch64(s + 16); + uint64_t vf = a + z; + uint64_t vs = b + rotate(a, 31) + c; + a = fetch64(s + 16) + fetch64(s + len - 32); + z = fetch64(s + len - 8); + b = rotate(a + z, 52); + c = rotate(a, 37); + a += fetch64(s + len - 24); + c += rotate(a, 7); + a += fetch64(s + len - 16); + uint64_t wf = a + z; + uint64_t ws = b + rotate(a, 31) + c; + uint64_t r = shift_mix((vf + ws) * k2 + (wf + vs) * k0); + return shift_mix((seed ^ (r * k0)) + vs) * k2; +} + +inline uint64_t hash_short(const char *s, size_t length, uint64_t seed) { + if (length >= 4 && length <= 8) + return hash_4to8_bytes(s, length, seed); + if (length > 8 && length <= 16) + return hash_9to16_bytes(s, length, seed); + if (length > 16 && length <= 32) + return hash_17to32_bytes(s, length, seed); + if (length > 32) + return hash_33to64_bytes(s, length, seed); + if (length != 0) + return hash_1to3_bytes(s, length, seed); + + return k2 ^ seed; +} + +/// The intermediate state used during hashing. +/// Currently, the algorithm for computing hash codes is based on CityHash and +/// keeps 56 bytes of arbitrary state. +struct hash_state { + uint64_t h0 = 0, h1 = 0, h2 = 0, h3 = 0, h4 = 0, h5 = 0, h6 = 0; + + /// Create a new hash_state structure and initialize it based on the + /// seed and the first 64-byte chunk. + /// This effectively performs the initial mix. + static hash_state create(const char *s, uint64_t seed) { + hash_state state = { + 0, seed, hash_16_bytes(seed, k1), rotate(seed ^ k1, 49), + seed * k1, shift_mix(seed), 0 }; + state.h6 = hash_16_bytes(state.h4, state.h5); + state.mix(s); + return state; + } + + /// Mix 32-bytes from the input sequence into the 16-bytes of 'a' + /// and 'b', including whatever is already in 'a' and 'b'. + static void mix_32_bytes(const char *s, uint64_t &a, uint64_t &b) { + a += fetch64(s); + uint64_t c = fetch64(s + 24); + b = rotate(b + a + c, 21); + uint64_t d = a; + a += fetch64(s + 8) + fetch64(s + 16); + b += rotate(a, 44) + d; + a += c; + } + + /// Mix in a 64-byte buffer of data. + /// We mix all 64 bytes even when the chunk length is smaller, but we + /// record the actual length. + void mix(const char *s) { + h0 = rotate(h0 + h1 + h3 + fetch64(s + 8), 37) * k1; + h1 = rotate(h1 + h4 + fetch64(s + 48), 42) * k1; + h0 ^= h6; + h1 += h3 + fetch64(s + 40); + h2 = rotate(h2 + h5, 33) * k1; + h3 = h4 * k1; + h4 = h0 + h5; + mix_32_bytes(s, h3, h4); + h5 = h2 + h6; + h6 = h1 + fetch64(s + 16); + mix_32_bytes(s + 32, h5, h6); + std::swap(h2, h0); + } + + /// Compute the final 64-bit hash code value based on the current + /// state and the length of bytes hashed. + uint64_t finalize(size_t length) { + return hash_16_bytes(hash_16_bytes(h3, h5) + shift_mix(h1) * k1 + h2, + hash_16_bytes(h4, h6) + shift_mix(length) * k1 + h0); + } +}; + + +/// A global, fixed seed-override variable. +/// +/// This variable can be set using the \see llvm::set_fixed_execution_seed +/// function. See that function for details. Do not, under any circumstances, +/// set or read this variable. +extern uint64_t fixed_seed_override; + +inline uint64_t get_execution_seed() { + // FIXME: This needs to be a per-execution seed. This is just a placeholder + // implementation. Switching to a per-execution seed is likely to flush out + // instability bugs and so will happen as its own commit. + // + // However, if there is a fixed seed override set the first time this is + // called, return that instead of the per-execution seed. + const uint64_t seed_prime = 0xff51afd7ed558ccdULL; + static uint64_t seed = fixed_seed_override ? fixed_seed_override : seed_prime; + return seed; +} + + +/// Trait to indicate whether a type's bits can be hashed directly. +/// +/// A type trait which is true if we want to combine values for hashing by +/// reading the underlying data. It is false if values of this type must +/// first be passed to hash_value, and the resulting hash_codes combined. +// +// FIXME: We want to replace is_integral_or_enum and is_pointer here with +// a predicate which asserts that comparing the underlying storage of two +// values of the type for equality is equivalent to comparing the two values +// for equality. For all the platforms we care about, this holds for integers +// and pointers, but there are platforms where it doesn't and we would like to +// support user-defined types which happen to satisfy this property. +template struct is_hashable_data + : std::integral_constant::value || + std::is_pointer::value) && + 64 % sizeof(T) == 0)> {}; + +// Special case std::pair to detect when both types are viable and when there +// is no alignment-derived padding in the pair. This is a bit of a lie because +// std::pair isn't truly POD, but it's close enough in all reasonable +// implementations for our use case of hashing the underlying data. +template struct is_hashable_data > + : std::integral_constant::value && + is_hashable_data::value && + (sizeof(T) + sizeof(U)) == + sizeof(std::pair))> {}; + +/// Helper to get the hashable data representation for a type. +/// This variant is enabled when the type itself can be used. +template +std::enable_if_t::value, T> +get_hashable_data(const T &value) { + return value; +} +/// Helper to get the hashable data representation for a type. +/// This variant is enabled when we must first call hash_value and use the +/// result as our data. +template +std::enable_if_t::value, size_t> +get_hashable_data(const T &value) { + using ::llvm::hash_value; + return hash_value(value); +} + +/// Helper to store data from a value into a buffer and advance the +/// pointer into that buffer. +/// +/// This routine first checks whether there is enough space in the provided +/// buffer, and if not immediately returns false. If there is space, it +/// copies the underlying bytes of value into the buffer, advances the +/// buffer_ptr past the copied bytes, and returns true. +template +bool store_and_advance(char *&buffer_ptr, char *buffer_end, const T& value, + size_t offset = 0) { + size_t store_size = sizeof(value) - offset; + if (buffer_ptr + store_size > buffer_end) + return false; + const char *value_data = reinterpret_cast(&value); + memcpy(buffer_ptr, value_data + offset, store_size); + buffer_ptr += store_size; + return true; +} + +/// Implement the combining of integral values into a hash_code. +/// +/// This overload is selected when the value type of the iterator is +/// integral. Rather than computing a hash_code for each object and then +/// combining them, this (as an optimization) directly combines the integers. +template +hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) { + const uint64_t seed = get_execution_seed(); + char buffer[64], *buffer_ptr = buffer; + char *const buffer_end = std::end(buffer); + while (first != last && store_and_advance(buffer_ptr, buffer_end, + get_hashable_data(*first))) + ++first; + if (first == last) + return hash_short(buffer, buffer_ptr - buffer, seed); + assert(buffer_ptr == buffer_end); + + hash_state state = state.create(buffer, seed); + size_t length = 64; + while (first != last) { + // Fill up the buffer. We don't clear it, which re-mixes the last round + // when only a partial 64-byte chunk is left. + buffer_ptr = buffer; + while (first != last && store_and_advance(buffer_ptr, buffer_end, + get_hashable_data(*first))) + ++first; + + // Rotate the buffer if we did a partial fill in order to simulate doing + // a mix of the last 64-bytes. That is how the algorithm works when we + // have a contiguous byte sequence, and we want to emulate that here. + std::rotate(buffer, buffer_ptr, buffer_end); + + // Mix this chunk into the current state. + state.mix(buffer); + length += buffer_ptr - buffer; + }; + + return state.finalize(length); +} + +/// Implement the combining of integral values into a hash_code. +/// +/// This overload is selected when the value type of the iterator is integral +/// and when the input iterator is actually a pointer. Rather than computing +/// a hash_code for each object and then combining them, this (as an +/// optimization) directly combines the integers. Also, because the integers +/// are stored in contiguous memory, this routine avoids copying each value +/// and directly reads from the underlying memory. +template +std::enable_if_t::value, hash_code> +hash_combine_range_impl(ValueT *first, ValueT *last) { + const uint64_t seed = get_execution_seed(); + const char *s_begin = reinterpret_cast(first); + const char *s_end = reinterpret_cast(last); + const size_t length = std::distance(s_begin, s_end); + if (length <= 64) + return hash_short(s_begin, length, seed); + + const char *s_aligned_end = s_begin + (length & ~63); + hash_state state = state.create(s_begin, seed); + s_begin += 64; + while (s_begin != s_aligned_end) { + state.mix(s_begin); + s_begin += 64; + } + if (length & 63) + state.mix(s_end - 64); + + return state.finalize(length); +} + +} // namespace detail +} // namespace hashing + + +/// Compute a hash_code for a sequence of values. +/// +/// This hashes a sequence of values. It produces the same hash_code as +/// 'hash_combine(a, b, c, ...)', but can run over arbitrary sized sequences +/// and is significantly faster given pointers and types which can be hashed as +/// a sequence of bytes. +template +hash_code hash_combine_range(InputIteratorT first, InputIteratorT last) { + return ::llvm::hashing::detail::hash_combine_range_impl(first, last); +} + + +// Implementation details for hash_combine. +namespace hashing { +namespace detail { + +/// Helper class to manage the recursive combining of hash_combine +/// arguments. +/// +/// This class exists to manage the state and various calls involved in the +/// recursive combining of arguments used in hash_combine. It is particularly +/// useful at minimizing the code in the recursive calls to ease the pain +/// caused by a lack of variadic functions. +struct hash_combine_recursive_helper { + char buffer[64] = {}; + hash_state state; + const uint64_t seed; + +public: + /// Construct a recursive hash combining helper. + /// + /// This sets up the state for a recursive hash combine, including getting + /// the seed and buffer setup. + hash_combine_recursive_helper() + : seed(get_execution_seed()) {} + + /// Combine one chunk of data into the current in-flight hash. + /// + /// This merges one chunk of data into the hash. First it tries to buffer + /// the data. If the buffer is full, it hashes the buffer into its + /// hash_state, empties it, and then merges the new chunk in. This also + /// handles cases where the data straddles the end of the buffer. + template + char *combine_data(size_t &length, char *buffer_ptr, char *buffer_end, T data) { + if (!store_and_advance(buffer_ptr, buffer_end, data)) { + // Check for skew which prevents the buffer from being packed, and do + // a partial store into the buffer to fill it. This is only a concern + // with the variadic combine because that formation can have varying + // argument types. + size_t partial_store_size = buffer_end - buffer_ptr; + memcpy(buffer_ptr, &data, partial_store_size); + + // If the store fails, our buffer is full and ready to hash. We have to + // either initialize the hash state (on the first full buffer) or mix + // this buffer into the existing hash state. Length tracks the *hashed* + // length, not the buffered length. + if (length == 0) { + state = state.create(buffer, seed); + length = 64; + } else { + // Mix this chunk into the current state and bump length up by 64. + state.mix(buffer); + length += 64; + } + // Reset the buffer_ptr to the head of the buffer for the next chunk of + // data. + buffer_ptr = buffer; + + // Try again to store into the buffer -- this cannot fail as we only + // store types smaller than the buffer. + if (!store_and_advance(buffer_ptr, buffer_end, data, + partial_store_size)) + llvm_unreachable("buffer smaller than stored type"); + } + return buffer_ptr; + } + + /// Recursive, variadic combining method. + /// + /// This function recurses through each argument, combining that argument + /// into a single hash. + template + hash_code combine(size_t length, char *buffer_ptr, char *buffer_end, + const T &arg, const Ts &...args) { + buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg)); + + // Recurse to the next argument. + return combine(length, buffer_ptr, buffer_end, args...); + } + + /// Base case for recursive, variadic combining. + /// + /// The base case when combining arguments recursively is reached when all + /// arguments have been handled. It flushes the remaining buffer and + /// constructs a hash_code. + hash_code combine(size_t length, char *buffer_ptr, char *buffer_end) { + // Check whether the entire set of values fit in the buffer. If so, we'll + // use the optimized short hashing routine and skip state entirely. + if (length == 0) + return hash_short(buffer, buffer_ptr - buffer, seed); + + // Mix the final buffer, rotating it if we did a partial fill in order to + // simulate doing a mix of the last 64-bytes. That is how the algorithm + // works when we have a contiguous byte sequence, and we want to emulate + // that here. + std::rotate(buffer, buffer_ptr, buffer_end); + + // Mix this chunk into the current state. + state.mix(buffer); + length += buffer_ptr - buffer; + + return state.finalize(length); + } +}; + +} // namespace detail +} // namespace hashing + +/// Combine values into a single hash_code. +/// +/// This routine accepts a varying number of arguments of any type. It will +/// attempt to combine them into a single hash_code. For user-defined types it +/// attempts to call a \see hash_value overload (via ADL) for the type. For +/// integer and pointer types it directly combines their data into the +/// resulting hash_code. +/// +/// The result is suitable for returning from a user's hash_value +/// *implementation* for their user-defined type. Consumers of a type should +/// *not* call this routine, they should instead call 'hash_value'. +template hash_code hash_combine(const Ts &...args) { + // Recursively hash each argument using a helper class. + ::llvm::hashing::detail::hash_combine_recursive_helper helper; + return helper.combine(0, helper.buffer, helper.buffer + 64, args...); +} + +// Implementation details for implementations of hash_value overloads provided +// here. +namespace hashing { +namespace detail { + +/// Helper to hash the value of a single integer. +/// +/// Overloads for smaller integer types are not provided to ensure consistent +/// behavior in the presence of integral promotions. Essentially, +/// "hash_value('4')" and "hash_value('0' + 4)" should be the same. +inline hash_code hash_integer_value(uint64_t value) { + // Similar to hash_4to8_bytes but using a seed instead of length. + const uint64_t seed = get_execution_seed(); + const char *s = reinterpret_cast(&value); + const uint64_t a = fetch32(s); + return hash_16_bytes(seed + (a << 3), fetch32(s + 4)); +} + +} // namespace detail +} // namespace hashing + +// Declared and documented above, but defined here so that any of the hashing +// infrastructure is available. +template +std::enable_if_t::value, hash_code> hash_value(T value) { + return ::llvm::hashing::detail::hash_integer_value( + static_cast(value)); +} + +// Declared and documented above, but defined here so that any of the hashing +// infrastructure is available. +template hash_code hash_value(const T *ptr) { + return ::llvm::hashing::detail::hash_integer_value( + reinterpret_cast(ptr)); +} + +// Declared and documented above, but defined here so that any of the hashing +// infrastructure is available. +template +hash_code hash_value(const std::pair &arg) { + return hash_combine(arg.first, arg.second); +} + +// Declared and documented above, but defined here so that any of the hashing +// infrastructure is available. +template +hash_code hash_value(const std::basic_string &arg) { + return hash_combine_range(arg.begin(), arg.end()); +} + +} // namespace llvm +}} // namespace swift::runtime + +#endif diff --git a/Sources/_CJavaScriptEventLoop/include/llvm/ADT/None.h b/Sources/_CJavaScriptEventLoop/include/llvm/ADT/None.h new file mode 100644 index 00000000..a3ca6795 --- /dev/null +++ b/Sources/_CJavaScriptEventLoop/include/llvm/ADT/None.h @@ -0,0 +1,28 @@ +//===-- None.h - Simple null value for implicit construction ------*- C++ -*-=// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file provides None, an enumerator for use in implicit constructors +// of various (usually templated) types to make such construction more +// terse. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_NONE_H +#define LLVM_ADT_NONE_H + +inline namespace __swift { inline namespace __runtime { +namespace llvm { +/// A simple null object to allow implicit construction of Optional +/// and similar types without having to spell out the specialization's name. +// (constant value 1 in an attempt to workaround MSVC build issue... ) +enum class NoneType { None = 1 }; +const NoneType None = NoneType::None; +} +}} // swift::runtime + +#endif diff --git a/Sources/_CJavaScriptEventLoop/include/llvm/ADT/Optional.h b/Sources/_CJavaScriptEventLoop/include/llvm/ADT/Optional.h new file mode 100644 index 00000000..729e1bab --- /dev/null +++ b/Sources/_CJavaScriptEventLoop/include/llvm/ADT/Optional.h @@ -0,0 +1,447 @@ +//===- Optional.h - Simple variant for passing optional values --*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file provides Optional, a template class modeled in the spirit of +// OCaml's 'opt' variant. The idea is to strongly type whether or not +// a value can be optional. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_OPTIONAL_H +#define LLVM_ADT_OPTIONAL_H + +#include "llvm/ADT/None.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/type_traits.h" +#include +#include +#include +#include + +inline namespace __swift { inline namespace __runtime { +namespace llvm { + +class raw_ostream; + +namespace optional_detail { + +struct in_place_t {}; + +/// Storage for any type. +template ::value> +class OptionalStorage { + union { + char empty; + T value; + }; + bool hasVal; + +public: + ~OptionalStorage() { reset(); } + + OptionalStorage() noexcept : empty(), hasVal(false) {} + + OptionalStorage(OptionalStorage const &other) : OptionalStorage() { + if (other.hasValue()) { + emplace(other.value); + } + } + OptionalStorage(OptionalStorage &&other) : OptionalStorage() { + if (other.hasValue()) { + emplace(std::move(other.value)); + } + } + + template + explicit OptionalStorage(in_place_t, Args &&... args) + : value(std::forward(args)...), hasVal(true) {} + + void reset() noexcept { + if (hasVal) { + value.~T(); + hasVal = false; + } + } + + bool hasValue() const noexcept { return hasVal; } + + T &getValue() LLVM_LVALUE_FUNCTION noexcept { + assert(hasVal); + return value; + } + T const &getValue() const LLVM_LVALUE_FUNCTION noexcept { + assert(hasVal); + return value; + } +#if LLVM_HAS_RVALUE_REFERENCE_THIS + T &&getValue() && noexcept { + assert(hasVal); + return std::move(value); + } +#endif + + template void emplace(Args &&... args) { + reset(); + ::new ((void *)std::addressof(value)) T(std::forward(args)...); + hasVal = true; + } + + OptionalStorage &operator=(T const &y) { + if (hasValue()) { + value = y; + } else { + ::new ((void *)std::addressof(value)) T(y); + hasVal = true; + } + return *this; + } + OptionalStorage &operator=(T &&y) { + if (hasValue()) { + value = std::move(y); + } else { + ::new ((void *)std::addressof(value)) T(std::move(y)); + hasVal = true; + } + return *this; + } + + OptionalStorage &operator=(OptionalStorage const &other) { + if (other.hasValue()) { + if (hasValue()) { + value = other.value; + } else { + ::new ((void *)std::addressof(value)) T(other.value); + hasVal = true; + } + } else { + reset(); + } + return *this; + } + + OptionalStorage &operator=(OptionalStorage &&other) { + if (other.hasValue()) { + if (hasValue()) { + value = std::move(other.value); + } else { + ::new ((void *)std::addressof(value)) T(std::move(other.value)); + hasVal = true; + } + } else { + reset(); + } + return *this; + } +}; + +template class OptionalStorage { + union { + char empty; + T value; + }; + bool hasVal = false; + +public: + ~OptionalStorage() = default; + + OptionalStorage() noexcept : empty{} {} + + OptionalStorage(OptionalStorage const &other) = default; + OptionalStorage(OptionalStorage &&other) = default; + + OptionalStorage &operator=(OptionalStorage const &other) = default; + OptionalStorage &operator=(OptionalStorage &&other) = default; + + template + explicit OptionalStorage(in_place_t, Args &&... args) + : value(std::forward(args)...), hasVal(true) {} + + void reset() noexcept { + if (hasVal) { + value.~T(); + hasVal = false; + } + } + + bool hasValue() const noexcept { return hasVal; } + + T &getValue() LLVM_LVALUE_FUNCTION noexcept { + assert(hasVal); + return value; + } + T const &getValue() const LLVM_LVALUE_FUNCTION noexcept { + assert(hasVal); + return value; + } +#if LLVM_HAS_RVALUE_REFERENCE_THIS + T &&getValue() && noexcept { + assert(hasVal); + return std::move(value); + } +#endif + + template void emplace(Args &&... args) { + reset(); + ::new ((void *)std::addressof(value)) T(std::forward(args)...); + hasVal = true; + } + + OptionalStorage &operator=(T const &y) { + if (hasValue()) { + value = y; + } else { + ::new ((void *)std::addressof(value)) T(y); + hasVal = true; + } + return *this; + } + OptionalStorage &operator=(T &&y) { + if (hasValue()) { + value = std::move(y); + } else { + ::new ((void *)std::addressof(value)) T(std::move(y)); + hasVal = true; + } + return *this; + } +}; + +} // namespace optional_detail + +template class Optional { + optional_detail::OptionalStorage Storage; + +public: + using value_type = T; + + constexpr Optional() {} + constexpr Optional(NoneType) {} + + Optional(const T &y) : Storage(optional_detail::in_place_t{}, y) {} + Optional(const Optional &O) = default; + + Optional(T &&y) : Storage(optional_detail::in_place_t{}, std::move(y)) {} + Optional(Optional &&O) = default; + + Optional &operator=(T &&y) { + Storage = std::move(y); + return *this; + } + Optional &operator=(Optional &&O) = default; + + /// Create a new object by constructing it in place with the given arguments. + template void emplace(ArgTypes &&... Args) { + Storage.emplace(std::forward(Args)...); + } + + static inline Optional create(const T *y) { + return y ? Optional(*y) : Optional(); + } + + Optional &operator=(const T &y) { + Storage = y; + return *this; + } + Optional &operator=(const Optional &O) = default; + + void reset() { Storage.reset(); } + + const T *getPointer() const { return &Storage.getValue(); } + T *getPointer() { return &Storage.getValue(); } + const T &getValue() const LLVM_LVALUE_FUNCTION { return Storage.getValue(); } + T &getValue() LLVM_LVALUE_FUNCTION { return Storage.getValue(); } + + explicit operator bool() const { return hasValue(); } + bool hasValue() const { return Storage.hasValue(); } + const T *operator->() const { return getPointer(); } + T *operator->() { return getPointer(); } + const T &operator*() const LLVM_LVALUE_FUNCTION { return getValue(); } + T &operator*() LLVM_LVALUE_FUNCTION { return getValue(); } + + template + constexpr T getValueOr(U &&value) const LLVM_LVALUE_FUNCTION { + return hasValue() ? getValue() : std::forward(value); + } + + /// Apply a function to the value if present; otherwise return None. + template + auto map(const Function &F) const LLVM_LVALUE_FUNCTION + -> Optional { + if (*this) return F(getValue()); + return None; + } + +#if LLVM_HAS_RVALUE_REFERENCE_THIS + T &&getValue() && { return std::move(Storage.getValue()); } + T &&operator*() && { return std::move(Storage.getValue()); } + + template + T getValueOr(U &&value) && { + return hasValue() ? std::move(getValue()) : std::forward(value); + } + + /// Apply a function to the value if present; otherwise return None. + template + auto map(const Function &F) && + -> Optional { + if (*this) return F(std::move(*this).getValue()); + return None; + } +#endif +}; + +template +bool operator==(const Optional &X, const Optional &Y) { + if (X && Y) + return *X == *Y; + return X.hasValue() == Y.hasValue(); +} + +template +bool operator!=(const Optional &X, const Optional &Y) { + return !(X == Y); +} + +template +bool operator<(const Optional &X, const Optional &Y) { + if (X && Y) + return *X < *Y; + return X.hasValue() < Y.hasValue(); +} + +template +bool operator<=(const Optional &X, const Optional &Y) { + return !(Y < X); +} + +template +bool operator>(const Optional &X, const Optional &Y) { + return Y < X; +} + +template +bool operator>=(const Optional &X, const Optional &Y) { + return !(X < Y); +} + +template +bool operator==(const Optional &X, NoneType) { + return !X; +} + +template +bool operator==(NoneType, const Optional &X) { + return X == None; +} + +template +bool operator!=(const Optional &X, NoneType) { + return !(X == None); +} + +template +bool operator!=(NoneType, const Optional &X) { + return X != None; +} + +template bool operator<(const Optional &X, NoneType) { + return false; +} + +template bool operator<(NoneType, const Optional &X) { + return X.hasValue(); +} + +template bool operator<=(const Optional &X, NoneType) { + return !(None < X); +} + +template bool operator<=(NoneType, const Optional &X) { + return !(X < None); +} + +template bool operator>(const Optional &X, NoneType) { + return None < X; +} + +template bool operator>(NoneType, const Optional &X) { + return X < None; +} + +template bool operator>=(const Optional &X, NoneType) { + return None <= X; +} + +template bool operator>=(NoneType, const Optional &X) { + return X <= None; +} + +template bool operator==(const Optional &X, const T &Y) { + return X && *X == Y; +} + +template bool operator==(const T &X, const Optional &Y) { + return Y && X == *Y; +} + +template bool operator!=(const Optional &X, const T &Y) { + return !(X == Y); +} + +template bool operator!=(const T &X, const Optional &Y) { + return !(X == Y); +} + +template bool operator<(const Optional &X, const T &Y) { + return !X || *X < Y; +} + +template bool operator<(const T &X, const Optional &Y) { + return Y && X < *Y; +} + +template bool operator<=(const Optional &X, const T &Y) { + return !(Y < X); +} + +template bool operator<=(const T &X, const Optional &Y) { + return !(Y < X); +} + +template bool operator>(const Optional &X, const T &Y) { + return Y < X; +} + +template bool operator>(const T &X, const Optional &Y) { + return Y < X; +} + +template bool operator>=(const Optional &X, const T &Y) { + return !(X < Y); +} + +template bool operator>=(const T &X, const Optional &Y) { + return !(X < Y); +} + +raw_ostream &operator<<(raw_ostream &OS, NoneType); + +template () + << std::declval())> +raw_ostream &operator<<(raw_ostream &OS, const Optional &O) { + if (O) + OS << *O; + else + OS << None; + return OS; +} + +} // end namespace llvm +}} // namespace swift::runtime + +#endif // LLVM_ADT_OPTIONAL_H diff --git a/Sources/_CJavaScriptEventLoop/include/llvm/ADT/PointerIntPair.h b/Sources/_CJavaScriptEventLoop/include/llvm/ADT/PointerIntPair.h new file mode 100644 index 00000000..9d5c9de9 --- /dev/null +++ b/Sources/_CJavaScriptEventLoop/include/llvm/ADT/PointerIntPair.h @@ -0,0 +1,246 @@ +//===- llvm/ADT/PointerIntPair.h - Pair for pointer and int -----*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the PointerIntPair class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_POINTERINTPAIR_H +#define LLVM_ADT_POINTERINTPAIR_H + +#include "llvm/Support/Compiler.h" +#include "llvm/Support/PointerLikeTypeTraits.h" +#include "llvm/Support/type_traits.h" +#include +#include +#include + +inline namespace __swift { inline namespace __runtime { +namespace llvm { + +template struct DenseMapInfo; +template +struct PointerIntPairInfo; + +/// PointerIntPair - This class implements a pair of a pointer and small +/// integer. It is designed to represent this in the space required by one +/// pointer by bitmangling the integer into the low part of the pointer. This +/// can only be done for small integers: typically up to 3 bits, but it depends +/// on the number of bits available according to PointerLikeTypeTraits for the +/// type. +/// +/// Note that PointerIntPair always puts the IntVal part in the highest bits +/// possible. For example, PointerIntPair will put the bit for +/// the bool into bit #2, not bit #0, which allows the low two bits to be used +/// for something else. For example, this allows: +/// PointerIntPair, 1, bool> +/// ... and the two bools will land in different bits. +template , + typename Info = PointerIntPairInfo> +class PointerIntPair { + // Used by MSVC visualizer and generally helpful for debugging/visualizing. + using InfoTy = Info; + intptr_t Value = 0; + +public: + constexpr PointerIntPair() = default; + + PointerIntPair(PointerTy PtrVal, IntType IntVal) { + setPointerAndInt(PtrVal, IntVal); + } + + explicit PointerIntPair(PointerTy PtrVal) { initWithPointer(PtrVal); } + + PointerTy getPointer() const { return Info::getPointer(Value); } + + IntType getInt() const { return (IntType)Info::getInt(Value); } + + void setPointer(PointerTy PtrVal) LLVM_LVALUE_FUNCTION { + Value = Info::updatePointer(Value, PtrVal); + } + + void setInt(IntType IntVal) LLVM_LVALUE_FUNCTION { + Value = Info::updateInt(Value, static_cast(IntVal)); + } + + void initWithPointer(PointerTy PtrVal) LLVM_LVALUE_FUNCTION { + Value = Info::updatePointer(0, PtrVal); + } + + void setPointerAndInt(PointerTy PtrVal, IntType IntVal) LLVM_LVALUE_FUNCTION { + Value = Info::updateInt(Info::updatePointer(0, PtrVal), + static_cast(IntVal)); + } + + PointerTy const *getAddrOfPointer() const { + return const_cast(this)->getAddrOfPointer(); + } + + PointerTy *getAddrOfPointer() { + assert(Value == reinterpret_cast(getPointer()) && + "Can only return the address if IntBits is cleared and " + "PtrTraits doesn't change the pointer"); + return reinterpret_cast(&Value); + } + + void *getOpaqueValue() const { return reinterpret_cast(Value); } + + void setFromOpaqueValue(void *Val) LLVM_LVALUE_FUNCTION { + Value = reinterpret_cast(Val); + } + + static PointerIntPair getFromOpaqueValue(void *V) { + PointerIntPair P; + P.setFromOpaqueValue(V); + return P; + } + + // Allow PointerIntPairs to be created from const void * if and only if the + // pointer type could be created from a const void *. + static PointerIntPair getFromOpaqueValue(const void *V) { + (void)PtrTraits::getFromVoidPointer(V); + return getFromOpaqueValue(const_cast(V)); + } + + bool operator==(const PointerIntPair &RHS) const { + return Value == RHS.Value; + } + + bool operator!=(const PointerIntPair &RHS) const { + return Value != RHS.Value; + } + + bool operator<(const PointerIntPair &RHS) const { return Value < RHS.Value; } + bool operator>(const PointerIntPair &RHS) const { return Value > RHS.Value; } + + bool operator<=(const PointerIntPair &RHS) const { + return Value <= RHS.Value; + } + + bool operator>=(const PointerIntPair &RHS) const { + return Value >= RHS.Value; + } +}; + +// Specialize is_trivially_copyable to avoid limitation of llvm::is_trivially_copyable +// when compiled with gcc 4.9. +template +struct is_trivially_copyable> : std::true_type { +#ifdef HAVE_STD_IS_TRIVIALLY_COPYABLE + static_assert(std::is_trivially_copyable>::value, + "inconsistent behavior between llvm:: and std:: implementation of is_trivially_copyable"); +#endif +}; + + +template +struct PointerIntPairInfo { + static_assert(PtrTraits::NumLowBitsAvailable < + std::numeric_limits::digits, + "cannot use a pointer type that has all bits free"); + static_assert(IntBits <= PtrTraits::NumLowBitsAvailable, + "PointerIntPair with integer size too large for pointer"); + enum MaskAndShiftConstants : uintptr_t { + /// PointerBitMask - The bits that come from the pointer. + PointerBitMask = + ~(uintptr_t)(((intptr_t)1 << PtrTraits::NumLowBitsAvailable) - 1), + + /// IntShift - The number of low bits that we reserve for other uses, and + /// keep zero. + IntShift = (uintptr_t)PtrTraits::NumLowBitsAvailable - IntBits, + + /// IntMask - This is the unshifted mask for valid bits of the int type. + IntMask = (uintptr_t)(((intptr_t)1 << IntBits) - 1), + + // ShiftedIntMask - This is the bits for the integer shifted in place. + ShiftedIntMask = (uintptr_t)(IntMask << IntShift) + }; + + static PointerT getPointer(intptr_t Value) { + return PtrTraits::getFromVoidPointer( + reinterpret_cast(Value & PointerBitMask)); + } + + static intptr_t getInt(intptr_t Value) { + return (Value >> IntShift) & IntMask; + } + + static intptr_t updatePointer(intptr_t OrigValue, PointerT Ptr) { + intptr_t PtrWord = + reinterpret_cast(PtrTraits::getAsVoidPointer(Ptr)); + assert((PtrWord & ~PointerBitMask) == 0 && + "Pointer is not sufficiently aligned"); + // Preserve all low bits, just update the pointer. + return PtrWord | (OrigValue & ~PointerBitMask); + } + + static intptr_t updateInt(intptr_t OrigValue, intptr_t Int) { + intptr_t IntWord = static_cast(Int); + assert((IntWord & ~IntMask) == 0 && "Integer too large for field"); + + // Preserve all bits other than the ones we are updating. + return (OrigValue & ~ShiftedIntMask) | IntWord << IntShift; + } +}; + +// Provide specialization of DenseMapInfo for PointerIntPair. +template +struct DenseMapInfo> { + using Ty = PointerIntPair; + + static Ty getEmptyKey() { + uintptr_t Val = static_cast(-1); + Val <<= PointerLikeTypeTraits::NumLowBitsAvailable; + return Ty::getFromOpaqueValue(reinterpret_cast(Val)); + } + + static Ty getTombstoneKey() { + uintptr_t Val = static_cast(-2); + Val <<= PointerLikeTypeTraits::NumLowBitsAvailable; + return Ty::getFromOpaqueValue(reinterpret_cast(Val)); + } + + static unsigned getHashValue(Ty V) { + uintptr_t IV = reinterpret_cast(V.getOpaqueValue()); + return unsigned(IV) ^ unsigned(IV >> 9); + } + + static bool isEqual(const Ty &LHS, const Ty &RHS) { return LHS == RHS; } +}; + +// Teach SmallPtrSet that PointerIntPair is "basically a pointer". +template +struct PointerLikeTypeTraits< + PointerIntPair> { + static inline void * + getAsVoidPointer(const PointerIntPair &P) { + return P.getOpaqueValue(); + } + + static inline PointerIntPair + getFromVoidPointer(void *P) { + return PointerIntPair::getFromOpaqueValue(P); + } + + static inline PointerIntPair + getFromVoidPointer(const void *P) { + return PointerIntPair::getFromOpaqueValue(P); + } + + static constexpr int NumLowBitsAvailable = + PtrTraits::NumLowBitsAvailable - IntBits; +}; + +} // end namespace llvm +}} // namespace swift::runtime + +#endif // LLVM_ADT_POINTERINTPAIR_H diff --git a/Sources/_CJavaScriptEventLoop/include/llvm/ADT/PointerUnion.h b/Sources/_CJavaScriptEventLoop/include/llvm/ADT/PointerUnion.h new file mode 100644 index 00000000..1aeeaba3 --- /dev/null +++ b/Sources/_CJavaScriptEventLoop/include/llvm/ADT/PointerUnion.h @@ -0,0 +1,301 @@ +//===- llvm/ADT/PointerUnion.h - Discriminated Union of 2 Ptrs --*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the PointerUnion class, which is a discriminated union of +// pointer types. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_POINTERUNION_H +#define LLVM_ADT_POINTERUNION_H + +#include "llvm/ADT/DenseMapInfo.h" +#include "llvm/ADT/PointerIntPair.h" +#include "llvm/Support/PointerLikeTypeTraits.h" +#include +#include +#include + +inline namespace __swift { inline namespace __runtime { +namespace llvm { + +template struct PointerUnionTypeSelectorReturn { + using Return = T; +}; + +/// Get a type based on whether two types are the same or not. +/// +/// For: +/// +/// \code +/// using Ret = typename PointerUnionTypeSelector::Return; +/// \endcode +/// +/// Ret will be EQ type if T1 is same as T2 or NE type otherwise. +template +struct PointerUnionTypeSelector { + using Return = typename PointerUnionTypeSelectorReturn::Return; +}; + +template +struct PointerUnionTypeSelector { + using Return = typename PointerUnionTypeSelectorReturn::Return; +}; + +template +struct PointerUnionTypeSelectorReturn< + PointerUnionTypeSelector> { + using Return = + typename PointerUnionTypeSelector::Return; +}; + +namespace pointer_union_detail { + /// Determine the number of bits required to store integers with values < n. + /// This is ceil(log2(n)). + constexpr int bitsRequired(unsigned n) { + return n > 1 ? 1 + bitsRequired((n + 1) / 2) : 0; + } + + template constexpr int lowBitsAvailable() { + return std::min({PointerLikeTypeTraits::NumLowBitsAvailable...}); + } + + /// Find the index of a type in a list of types. TypeIndex::Index + /// is the index of T in Us, or sizeof...(Us) if T does not appear in the + /// list. + template struct TypeIndex; + template struct TypeIndex { + static constexpr int Index = 0; + }; + template + struct TypeIndex { + static constexpr int Index = 1 + TypeIndex::Index; + }; + template struct TypeIndex { + static constexpr int Index = 0; + }; + + /// Find the first type in a list of types. + template struct GetFirstType { + using type = T; + }; + + /// Provide PointerLikeTypeTraits for void* that is used by PointerUnion + /// for the template arguments. + template class PointerUnionUIntTraits { + public: + static inline void *getAsVoidPointer(void *P) { return P; } + static inline void *getFromVoidPointer(void *P) { return P; } + static constexpr int NumLowBitsAvailable = lowBitsAvailable(); + }; + + /// Implement assignment in terms of construction. + template struct AssignableFrom { + Derived &operator=(T t) { + return static_cast(*this) = Derived(t); + } + }; + + template + class PointerUnionMembers; + + template + class PointerUnionMembers { + protected: + ValTy Val; + PointerUnionMembers() = default; + PointerUnionMembers(ValTy Val) : Val(Val) {} + + friend struct PointerLikeTypeTraits; + }; + + template + class PointerUnionMembers + : public PointerUnionMembers { + using Base = PointerUnionMembers; + public: + using Base::Base; + PointerUnionMembers() = default; + PointerUnionMembers(Type V) + : Base(ValTy(const_cast( + PointerLikeTypeTraits::getAsVoidPointer(V)), + I)) {} + + using Base::operator=; + Derived &operator=(Type V) { + this->Val = ValTy( + const_cast(PointerLikeTypeTraits::getAsVoidPointer(V)), + I); + return static_cast(*this); + }; + }; +} + +/// A discriminated union of two or more pointer types, with the discriminator +/// in the low bit of the pointer. +/// +/// This implementation is extremely efficient in space due to leveraging the +/// low bits of the pointer, while exposing a natural and type-safe API. +/// +/// Common use patterns would be something like this: +/// PointerUnion P; +/// P = (int*)0; +/// printf("%d %d", P.is(), P.is()); // prints "1 0" +/// X = P.get(); // ok. +/// Y = P.get(); // runtime assertion failure. +/// Z = P.get(); // compile time failure. +/// P = (float*)0; +/// Y = P.get(); // ok. +/// X = P.get(); // runtime assertion failure. +template +class PointerUnion + : public pointer_union_detail::PointerUnionMembers< + PointerUnion, + PointerIntPair< + void *, pointer_union_detail::bitsRequired(sizeof...(PTs)), int, + pointer_union_detail::PointerUnionUIntTraits>, + 0, PTs...> { + // The first type is special because we want to directly cast a pointer to a + // default-initialized union to a pointer to the first type. But we don't + // want PointerUnion to be a 'template ' + // because it's much more convenient to have a name for the whole pack. So + // split off the first type here. + using First = typename pointer_union_detail::GetFirstType::type; + using Base = typename PointerUnion::PointerUnionMembers; + +public: + PointerUnion() = default; + + PointerUnion(std::nullptr_t) : PointerUnion() {} + using Base::Base; + + /// Test if the pointer held in the union is null, regardless of + /// which type it is. + bool isNull() const { return !this->Val.getPointer(); } + + explicit operator bool() const { return !isNull(); } + + /// Test if the Union currently holds the type matching T. + template bool is() const { + constexpr int Index = pointer_union_detail::TypeIndex::Index; + static_assert(Index < sizeof...(PTs), + "PointerUnion::is given type not in the union"); + return this->Val.getInt() == Index; + } + + /// Returns the value of the specified pointer type. + /// + /// If the specified pointer type is incorrect, assert. + template T get() const { + assert(is() && "Invalid accessor called"); + return PointerLikeTypeTraits::getFromVoidPointer(this->Val.getPointer()); + } + + /// Returns the current pointer if it is of the specified pointer type, + /// otherwises returns null. + template T dyn_cast() const { + if (is()) + return get(); + return T(); + } + + /// If the union is set to the first pointer type get an address pointing to + /// it. + First const *getAddrOfPtr1() const { + return const_cast(this)->getAddrOfPtr1(); + } + + /// If the union is set to the first pointer type get an address pointing to + /// it. + First *getAddrOfPtr1() { + assert(is() && "Val is not the first pointer"); + assert( + PointerLikeTypeTraits::getAsVoidPointer(get()) == + this->Val.getPointer() && + "Can't get the address because PointerLikeTypeTraits changes the ptr"); + return const_cast( + reinterpret_cast(this->Val.getAddrOfPointer())); + } + + /// Assignment from nullptr which just clears the union. + const PointerUnion &operator=(std::nullptr_t) { + this->Val.initWithPointer(nullptr); + return *this; + } + + /// Assignment from elements of the union. + using Base::operator=; + + void *getOpaqueValue() const { return this->Val.getOpaqueValue(); } + static inline PointerUnion getFromOpaqueValue(void *VP) { + PointerUnion V; + V.Val = decltype(V.Val)::getFromOpaqueValue(VP); + return V; + } +}; + +template +bool operator==(PointerUnion lhs, PointerUnion rhs) { + return lhs.getOpaqueValue() == rhs.getOpaqueValue(); +} + +template +bool operator!=(PointerUnion lhs, PointerUnion rhs) { + return lhs.getOpaqueValue() != rhs.getOpaqueValue(); +} + +template +bool operator<(PointerUnion lhs, PointerUnion rhs) { + return lhs.getOpaqueValue() < rhs.getOpaqueValue(); +} + +// Teach SmallPtrSet that PointerUnion is "basically a pointer", that has +// # low bits available = min(PT1bits,PT2bits)-1. +template +struct PointerLikeTypeTraits> { + static inline void *getAsVoidPointer(const PointerUnion &P) { + return P.getOpaqueValue(); + } + + static inline PointerUnion getFromVoidPointer(void *P) { + return PointerUnion::getFromOpaqueValue(P); + } + + // The number of bits available are the min of the pointer types minus the + // bits needed for the discriminator. + static constexpr int NumLowBitsAvailable = PointerLikeTypeTraits::Val)>::NumLowBitsAvailable; +}; + +// Teach DenseMap how to use PointerUnions as keys. +template struct DenseMapInfo> { + using Union = PointerUnion; + using FirstInfo = + DenseMapInfo::type>; + + static inline Union getEmptyKey() { return Union(FirstInfo::getEmptyKey()); } + + static inline Union getTombstoneKey() { + return Union(FirstInfo::getTombstoneKey()); + } + + static unsigned getHashValue(const Union &UnionVal) { + intptr_t key = (intptr_t)UnionVal.getOpaqueValue(); + return DenseMapInfo::getHashValue(key); + } + + static bool isEqual(const Union &LHS, const Union &RHS) { + return LHS == RHS; + } +}; + +} // end namespace llvm +}} // swift::runtime + +#endif // LLVM_ADT_POINTERUNION_H diff --git a/Sources/_CJavaScriptEventLoop/include/llvm/ADT/STLExtras.h b/Sources/_CJavaScriptEventLoop/include/llvm/ADT/STLExtras.h new file mode 100644 index 00000000..8beab3f8 --- /dev/null +++ b/Sources/_CJavaScriptEventLoop/include/llvm/ADT/STLExtras.h @@ -0,0 +1,1945 @@ +//===- llvm/ADT/STLExtras.h - Useful STL related functions ------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains some templates that are useful if you are working with the +// STL at all. +// +// No library is required when using these functions. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_STLEXTRAS_H +#define LLVM_ADT_STLEXTRAS_H + +#include "llvm/ADT/Optional.h" +#include "llvm/ADT/iterator.h" +#include "llvm/ADT/iterator_range.h" +#include "llvm/Support/ErrorHandling.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef EXPENSIVE_CHECKS +#include // for std::mt19937 +#endif + +inline namespace __swift { inline namespace __runtime { +namespace llvm { + +// Only used by compiler if both template types are the same. Useful when +// using SFINAE to test for the existence of member functions. +template struct SameType; + +namespace detail { + +template +using IterOfRange = decltype(std::begin(std::declval())); + +template +using ValueOfRange = typename std::remove_reference()))>::type; + +} // end namespace detail + +//===----------------------------------------------------------------------===// +// Extra additions to +//===----------------------------------------------------------------------===// + +template +struct negation : std::integral_constant {}; + +template struct conjunction : std::true_type {}; +template struct conjunction : B1 {}; +template +struct conjunction + : std::conditional, B1>::type {}; + +template struct make_const_ptr { + using type = + typename std::add_pointer::type>::type; +}; + +template struct make_const_ref { + using type = typename std::add_lvalue_reference< + typename std::add_const::type>::type; +}; + +/// Utilities for detecting if a given trait holds for some set of arguments +/// 'Args'. For example, the given trait could be used to detect if a given type +/// has a copy assignment operator: +/// template +/// using has_copy_assign_t = decltype(std::declval() +/// = std::declval()); +/// bool fooHasCopyAssign = is_detected::value; +namespace detail { +template using void_t = void; +template class Op, class... Args> struct detector { + using value_t = std::false_type; +}; +template