From 7867d2d97a9246115dd7cdb357e2a1c645f11b18 Mon Sep 17 00:00:00 2001 From: Youssef Victor <31348972+Youssef1313@users.noreply.github.com> Date: Sat, 15 Aug 2020 18:26:55 +0200 Subject: [PATCH 01/23] Create markdownlint.yml --- .github/workflows/markdownlint.yml | 32 ++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 .github/workflows/markdownlint.yml diff --git a/.github/workflows/markdownlint.yml b/.github/workflows/markdownlint.yml new file mode 100644 index 0000000000000..8e7a9afb70594 --- /dev/null +++ b/.github/workflows/markdownlint.yml @@ -0,0 +1,32 @@ +name: Markdownlint + +on: + push: + paths: + - "**/*.md" + - ".markdownlint.json" + - ".github/workflows/markdownlint.yml" + - ".github/workflows/markdownlint-problem-matcher.json" + pull_request: + paths: + - "**/*.md" + - ".markdownlint.json" + - ".github/workflows/markdownlint.yml" + - ".github/workflows/markdownlint-problem-matcher.json" + +jobs: + lint: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - name: Use Node.js + uses: actions/setup-node@v1 + with: + node-version: 12.x + - name: Run Markdownlint + run: | + echo "::add-matcher::.github/workflows/markdownlint-problem-matcher.json" + npm i -g markdownlint-cli + markdownlint "**/*.md" From 90d04115fca553dadb375398831e5a0d13d6d43b Mon Sep 17 00:00:00 2001 From: Youssef Victor <31348972+Youssef1313@users.noreply.github.com> Date: Sat, 15 Aug 2020 18:28:13 +0200 Subject: [PATCH 02/23] Create markdownlint-problem-matcher.json --- .../workflows/markdownlint-problem-matcher.json | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 .github/workflows/markdownlint-problem-matcher.json diff --git a/.github/workflows/markdownlint-problem-matcher.json b/.github/workflows/markdownlint-problem-matcher.json new file mode 100644 index 0000000000000..f0741f6b90626 --- /dev/null +++ b/.github/workflows/markdownlint-problem-matcher.json @@ -0,0 +1,17 @@ +{ + "problemMatcher": [ + { + "owner": "markdownlint", + "pattern": [ + { + "regexp": "^([^:]*):(\\d+):?(\\d+)?\\s([\\w-\\/]*)\\s(.*)$", + "file": 1, + "line": 2, + "column": 3, + "code": 4, + "message": 5 + } + ] + } + ] +} From ed2141d6d6e66fdf3654df0d970c57889f3fdcae Mon Sep 17 00:00:00 2001 From: Youssef Victor <31348972+Youssef1313@users.noreply.github.com> Date: Sat, 15 Aug 2020 18:36:44 +0200 Subject: [PATCH 03/23] Create .markdownlint.json --- .markdownlint.json | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 .markdownlint.json diff --git a/.markdownlint.json b/.markdownlint.json new file mode 100644 index 0000000000000..df973e1209da7 --- /dev/null +++ b/.markdownlint.json @@ -0,0 +1,4 @@ +{ + "default": false, + "MD009": true, +} From 72d9853fa085626c64e1654db74e8d8257bf8c13 Mon Sep 17 00:00:00 2001 From: Youssef Victor <31348972+Youssef1313@users.noreply.github.com> Date: Sat, 15 Aug 2020 18:41:26 +0200 Subject: [PATCH 04/23] Update .markdownlint.json --- .markdownlint.json | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.markdownlint.json b/.markdownlint.json index df973e1209da7..3cff3f10aba03 100644 --- a/.markdownlint.json +++ b/.markdownlint.json @@ -1,4 +1,6 @@ { "default": false, - "MD009": true, + "MD009": { + "br_spaces": 0 + }, } From a4bd2407bcd2714610d95d23641fecf6389833a8 Mon Sep 17 00:00:00 2001 From: Youssef Victor Date: Sat, 15 Aug 2020 20:29:13 +0200 Subject: [PATCH 05/23] Trim trailing whitespaces --- SECURITY.md | 4 +- .../api-guidelines/System.Memory.md | 2 +- .../api-guidelines/nullability.md | 2 +- .../breaking-change-definitions.md | 10 +- .../breaking-change-rules.md | 8 +- docs/coding-guidelines/breaking-changes.md | 2 +- docs/coding-guidelines/interop-guidelines.md | 10 +- docs/design/coreclr/botr/corelib.md | 2 +- docs/design/coreclr/botr/method-descriptor.md | 2 +- docs/design/coreclr/botr/readytorun-format.md | 6 +- docs/design/coreclr/botr/shared-generics.md | 2 +- docs/design/coreclr/botr/type-system.md | 2 +- .../coreclr/botr/vectors-and-intrinsics.md | 22 +-- .../coreclr/botr/xplat-minidump-generation.md | 2 +- docs/design/coreclr/jit/jit-call-morphing.md | 4 +- docs/design/coreclr/jit/lsra-detail.md | 12 +- docs/design/coreclr/jit/ryujit-tutorial.md | 2 +- .../profiling/Profiler Attach on CoreCLR.md | 2 +- .../profiling/Profiler Breaking Changes.md | 2 +- .../profiling/davbr-blog-archive/Attach.md | 48 +++--- .../profiling/davbr-blog-archive/Attach2.md | 112 ++++++------- .../CORPROF_E_UNSUPPORTED_CALL_SEQUENCE.md | 34 ++-- .../Debugging - Activation.md | 2 +- .../Debugging - SOS and IDs.md | 14 +- ...ckSnapshot - Callback CONTEXT Registers.md | 6 +- .../DoStackSnapshot - Exception Filters.md | 64 ++++---- .../DoStackSnapshot - HRESULTs.md | 22 +-- .../ELT Hooks - The Basics.md | 10 +- .../ELT Hooks - tail calls.md | 38 ++--- .../Generics and Your Profiler.md | 26 +-- ... Tokens, Run-Time IDs, and Type Loading.md | 42 ++--- .../davbr-blog-archive/Profiler Detach.md | 52 +++--- ...rofiler stack walking Basics and beyond.md | 154 +++++++++--------- .../davbr-blog-archive/ReJIT - The Basics.md | 64 ++++---- ...Signature Blob Parser for your Profiler.md | 46 +++--- .../Tail call JIT conditions.md | 32 ++-- .../davbr-blog-archive/Type Forwarding.md | 50 +++--- .../features/DotNetCore-SharedPackageStore.md | 6 +- docs/design/features/IJW-activation.md | 2 +- .../features/Linux-Hugepage-Crossgen2.md | 6 +- docs/design/features/OnStackReplacement.md | 20 +-- docs/design/features/StringDeduplication.md | 54 +++--- docs/design/features/additional-deps.md | 8 +- ...de-versioning-profiler-breaking-changes.md | 2 +- .../features/covariant-return-methods.md | 8 +- ...gen2-compilation-structure-enhancements.md | 4 +- docs/design/features/event-counter.md | 2 +- .../features/framework-version-resolution.md | 20 +-- .../features/globalization-invariant-mode.md | 56 +++---- .../host-component-dependencies-resolution.md | 2 +- docs/design/features/host-error-codes.md | 18 +- docs/design/features/host-probing.md | 22 +-- docs/design/features/host-startup-hook.md | 12 +- docs/design/features/host-tracing.md | 12 +- docs/design/features/raw-eventlistener.md | 8 +- .../readytorun-composite-format-design.md | 6 +- .../roll-forward-on-no-candidate-fx.md | 26 +-- .../features/source-generator-pinvokes.md | 2 +- docs/design/features/tiered-compilation.md | 2 +- docs/design/features/unloadability.md | 4 +- docs/design/specs/Ecma-335-Augments.md | 6 +- docs/design/specs/PE-COFF.md | 16 +- docs/design/specs/PortablePdb-Metadata.md | 26 +-- docs/issues-pr-management.md | 2 +- docs/project/dogfooding.md | 2 +- docs/project/linux-performance-tracing.md | 2 +- docs/project/strong-name-signing.md | 4 +- docs/project/versioning.md | 2 +- docs/project/windows-performance-tracing.md | 2 +- docs/project/writing-tests.md | 2 +- docs/workflow/README.md | 4 +- docs/workflow/building/libraries/README.md | 2 +- .../libraries/freebsd-instructions.md | 44 ++--- .../libraries/webassembly-instructions.md | 10 +- docs/workflow/building/mono/README.md | 8 +- docs/workflow/ci/coreclr-ci-health.md | 6 +- .../debugging/libraries/debugging-packages.md | 8 +- .../libraries/windows-instructions.md | 14 +- ...unning-aspnet-benchmarks-with-crossgen2.md | 12 +- .../testing/libraries/testing-android.md | 2 +- .../testing/libraries/testing-wasm.md | 6 +- docs/workflow/testing/mono/testing.md | 10 +- docs/workflow/testing/visualstudio.md | 8 +- eng/docker/Readme.md | 2 +- eng/pipelines/coreclr/readme.md | 2 +- .../src/dlls/mscoree/coreclr/README.md | 2 +- src/coreclr/src/inc/readme.md | 2 +- src/coreclr/src/pal/src/libunwind/README.md | 2 +- .../src/tools/ILVerification/README.md | 2 +- src/coreclr/src/tools/dotnet-pgo/README.md | 2 +- .../tools/dotnet-pgo/dotnet-pgo-experiment.md | 26 +-- .../managed/Microsoft.NET.HostModel/README.md | 6 +- .../tests/scripts/linux-test/README.md | 36 ++-- .../Common/src/System/Net/Internals/readme.md | 2 +- .../Net/EnterpriseTests/setup/README.md | 2 +- .../tests/System/Net/Prerequisites/README.md | 4 +- src/libraries/Microsoft.CSharp/README.md | 2 +- .../README.md | 2 +- .../Microsoft.VisualBasic.Core/README.md | 2 +- .../mef_guide/README.md | 2 +- .../tests/ManualTests/Readme.md | 2 +- .../src/DatabaseSetupInstructions.md | 6 +- .../src/ActivityUserGuide.md | 42 ++--- .../src/DiagnosticSourceUsersGuide.md | 26 +-- .../src/FlatRequestId.md | 12 +- .../src/HierarchicalRequestId.md | 24 +-- .../src/HttpCorrelationProtocol.md | 18 +- .../documentation/EventCounterTutorial.md | 12 +- .../System.Dynamic.Runtime/README.md | 2 +- .../System.Linq.Expressions/README.md | 2 +- .../src/HttpDiagnosticsGuide.md | 18 +- .../tests/StressTests/HttpStress/Readme.md | 4 +- .../tests/StressTests/SslStress/Readme.md | 4 +- .../docs/ParameterizedCtorSpec.md | 14 +- .../docs/ReferenceHandling_spec.md | 102 ++++++------ .../docs/SerializerProgrammingModel.md | 2 +- .../docs/writable_json_dom_spec.md | 34 ++-- .../System.Text.Json/roadmap/README.md | 4 +- .../System.Text.Json/source_package/README.md | 2 +- .../System.Utf8String.Experimental/README.md | 2 +- src/mono/mono/mini/cpu-amd64.md | 20 +-- src/mono/mono/mini/cpu-arm.md | 4 +- src/mono/mono/mini/cpu-mips.md | 6 +- src/mono/mono/mini/cpu-ppc.md | 6 +- src/mono/mono/mini/cpu-ppc64.md | 2 +- src/mono/mono/mini/cpu-s390x.md | 14 +- src/mono/mono/mini/cpu-sparc.md | 4 +- src/mono/mono/mini/cpu-x86.md | 26 +-- .../tests/metadata-verifier/cli-blob-tests.md | 22 +-- .../metadata-verifier/cli-cattr-tests.md | 2 +- .../cli-global-props-tests.md | 2 +- .../metadata-verifier/cli-metadata-tests.md | 4 +- .../metadata-verifier/cli-tables-tests.md | 94 +++++------ .../metadata-verifier/data-directory-tests.md | 40 ++--- .../tests/metadata-verifier/header-tests.md | 14 +- .../metadata-verifier/resources-tests.md | 2 +- .../metadata-verifier/section-table-tests.md | 8 +- src/tests/Interop/ReadMe.md | 2 +- .../JitBench/unofficial_dotnet/README.md | 8 +- src/tests/profiler/native/README.md | 2 +- tools-local/dotnet-deb-tool/README.md | 10 +- 141 files changed, 1068 insertions(+), 1068 deletions(-) diff --git a/SECURITY.md b/SECURITY.md index 29863ccaa542f..8312d20e45376 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -6,8 +6,8 @@ The .NET Core and ASP.NET Core support policy, including supported versions can ## Reporting a Vulnerability -Security issues and bugs should be reported privately to the Microsoft Security Response Center (MSRC), either by emailing secure@microsoft.com or via the portal at https://msrc.microsoft.com. -You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your +Security issues and bugs should be reported privately to the Microsoft Security Response Center (MSRC), either by emailing secure@microsoft.com or via the portal at https://msrc.microsoft.com. +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Further information, including the MSRC PGP key, can be found in the [MSRC Report an Issue FAQ](https://www.microsoft.com/en-us/msrc/faqs-report-an-issue). Reports via MSRC may qualify for the .NET Core Bug Bounty. Details of the .NET Core Bug Bounty including terms and conditions are at [https://aka.ms/corebounty](https://aka.ms/corebounty). diff --git a/docs/coding-guidelines/api-guidelines/System.Memory.md b/docs/coding-guidelines/api-guidelines/System.Memory.md index 894b8ca77d9eb..6df28c18ca5d2 100644 --- a/docs/coding-guidelines/api-guidelines/System.Memory.md +++ b/docs/coding-guidelines/api-guidelines/System.Memory.md @@ -51,4 +51,4 @@ specs can be found here: can be implicitly converted to `ReadOnlySpan`. * **AVOID** providing overloads for both `ReadOnlySpan`/`Span` as well as pointers and arrays as those can be implicitly converted to - `ReadOnlySpan`/`Span`. \ No newline at end of file + `ReadOnlySpan`/`Span`. diff --git a/docs/coding-guidelines/api-guidelines/nullability.md b/docs/coding-guidelines/api-guidelines/nullability.md index 62e0fe3958884..9cb1b96525c2f 100644 --- a/docs/coding-guidelines/api-guidelines/nullability.md +++ b/docs/coding-guidelines/api-guidelines/nullability.md @@ -114,7 +114,7 @@ A code review for enabling nullability generally involves three passes: - Adding `!` to reference type usage. These essentially suppress the null warning, telling the compiler to treat the expression as if it's non-null. These evaporate at compile-time. - Adding `Debug.Assert(reference != null);` statements. These inform the compiler that the mentioned reference is non-`null`, which will cause the compiler to factor that in and have the effect of suppressing subsequent warnings on that reference (until the flow analysis suggests that could change). As with any `Debug.Assert`, these evaporate at compile-time in release builds (where `DEBUG` isn't defined). - + - Most any other changes have the potential to change the IL, which should not be necessary for the feature. In particular, it's common for `?`s on dereferences to sneak in, e.g. changing `someVar.SomeMethod()` to `someVar?.SomeMethod()`; that is a change to the IL, and should only be employed when there's an actual known bug that's important to fix, as otherwise we're incurring unnecessary cost. Similarly, it's easy to accidentally add `?` to value types, which has a significant impact, changing the `T` to a `Nullable` and should be avoided. - Any `!`s added that should have been unnecessary and are required due to either a compiler issue or due to lack of expressibility about annotations should have a `// TODO-NULLABLE: http://link/to/relevant/issue` comment added on the same line. diff --git a/docs/coding-guidelines/breaking-change-definitions.md b/docs/coding-guidelines/breaking-change-definitions.md index 6eb6f958fc7e0..dbcbfd6c6cde2 100644 --- a/docs/coding-guidelines/breaking-change-definitions.md +++ b/docs/coding-guidelines/breaking-change-definitions.md @@ -6,7 +6,7 @@ Behavioral Change A behavioral change represents changes to the behavior of a member. A behavioral change may including throwing a new exception, adding or removing internal method calls, or alternating the way in which a return value is calculated. Behavioral changes can be the hardest type of change to categorize as acceptable or not - they can be severe in impact, or relatively innocuous. -Binary Compatibility +Binary Compatibility -------------------- Refers to the ability of existing consumers of an API to be able to use a newer version without recompilation. By definition, if an assembly's public signatures have been removed, or altered so that consumers can no longer access the same interface exposed by the assembly, the change is said to be a _binary incompatible change_. @@ -16,19 +16,19 @@ Source Compatibility Refers to the ability of existing consumers of an API to recompile against a newer version without any source changes. By definition, if a consumer needs to make changes to its code in order for it to build successfully against a newer version of an API, the change is said to be a _source incompatible change_. -Design-Time Compatibility +Design-Time Compatibility ------------------------- _Design-time compatibility_ refers to preserving the design-time experience across versions of Visual Studio and other design-time environments. This can involve details around the UI of the designer, but by far the most interesting design-time compatibility is project compatibility. A potential project (or solution), must be able to be opened, and used on a newer version of a designer. -Backwards Compatibility +Backwards Compatibility ----------------------- -_Backwards compatibility_ refers to the ability of an existing consumer of an API to run against, and behave in the same way against a newer version. By definition, if a consumer is not able to run, or behaves differently against the newer version of the API, then the API is said to be _backwards incompatible_. +_Backwards compatibility_ refers to the ability of an existing consumer of an API to run against, and behave in the same way against a newer version. By definition, if a consumer is not able to run, or behaves differently against the newer version of the API, then the API is said to be _backwards incompatible_. Changes that affect backwards compatibility are strongly discouraged. All alternates should be actively considered, since developers will, by default, expect backwards compatibility in newer versions of an API. -Forwards Compatibility +Forwards Compatibility ---------------------- _Forwards compatibility_ is the exact reverse of backwards compatibility; it refers to the ability of an existing consumer of an API to run against, and behave in the way against a _older_ version. By definition, if a consumer is not able to run, or behaves differently against an older version of the API, then the API is said to be _forwards incompatible_. diff --git a/docs/coding-guidelines/breaking-change-rules.md b/docs/coding-guidelines/breaking-change-rules.md index 6fff08837e168..ab230f9d279a0 100644 --- a/docs/coding-guidelines/breaking-change-rules.md +++ b/docs/coding-guidelines/breaking-change-rules.md @@ -19,14 +19,14 @@ Breaking Change Rules ### Property, Field, Parameter and Return Values ✓ **Allowed** * Increasing the range of accepted values for a property or parameter if the member _is not_ `virtual` - + Note that the range can only increase to the extent that it does not impact the static type. e.g. it is OK to remove `if (x > 10) throw new ArgumentOutOfRangeException("x")`, but it is not OK to change the type of `x` from `int` to `long` or `int?`. * Returning a value of a more derived type for a property, field, return or `out` value Note, again, that the static type cannot change. e.g. it is OK to return a `string` instance where an `object` was returned previously, but it is not OK to change the return type from `object` to `string`. -✗ **Disallowed** +✗ **Disallowed** * Increasing the range of accepted values for a property or parameter if the member _is_ `virtual` This is breaking because any existing overridden members will now not function correctly for the extended range of values. @@ -135,7 +135,7 @@ Breaking Change Rules So long as it does not introduce any new abstract members or change the semantics or behavior of existing members, a type can be introduced into a hierarchy between two existing types. For example, between .NET Framework 1.1 and .NET Framework 2.0, we introduced `DbConnection` as a new base class for `SqlConnection` which previously derived from `Component`. * Adding an interface implementation to a type - + This is acceptable because it will not adversely affect existing clients. Any changes which could be made to the type being changed in this situation, will have to work within the boundaries of acceptable changes defined here, in order for the new implementation to remain acceptable. Extreme caution is urged when adding interfaces that directly affect the ability of the designer or serializer to generate code or data, that cannot be consumed down-level. An example is the `ISerializable` interface. Care should be taken when the interface (or one of the interfaces that this interface requires) has default interface implementations for other interface methods. The default implementation could conflict with other default implementations in a derived class. @@ -205,7 +205,7 @@ Breaking Change Rules * Adding an overload that precludes an existing overload, and defines different behavior - This will break existing clients that were bound to the previous overload. For example, if you have a class that has a single version of a method that accepts a `uint`, an existing consumer will + This will break existing clients that were bound to the previous overload. For example, if you have a class that has a single version of a method that accepts a `uint`, an existing consumer will successfully bind to that overload, if simply passing an `int` value. However, if you add an overload that accepts an `int`, recompiling or via late-binding the application will now bind to the new overload. If different behavior results, then this is a breaking change. * Moving an exposed field onto a class higher in the hierarchy tree of the type from which it was removed diff --git a/docs/coding-guidelines/breaking-changes.md b/docs/coding-guidelines/breaking-changes.md index 5e4798cf7d1b9..78b787cb70b80 100644 --- a/docs/coding-guidelines/breaking-changes.md +++ b/docs/coding-guidelines/breaking-changes.md @@ -97,7 +97,7 @@ more latitude here in .NET Core. For buckets #2 and #3 we apply a risk-benefit analysis. It doesn't matter if the old behavior is "wrong", we still need to think through the implications. This -can result in one of the following outcomes: +can result in one of the following outcomes: * **Accepted with compat switch**. Depending on the estimated customer impact, we may decide to add a compat switch that allows consumers to bring back the diff --git a/docs/coding-guidelines/interop-guidelines.md b/docs/coding-guidelines/interop-guidelines.md index e6e3a1d32060d..7dff0711a8d3e 100644 --- a/docs/coding-guidelines/interop-guidelines.md +++ b/docs/coding-guidelines/interop-guidelines.md @@ -58,7 +58,7 @@ internal static partial class Interop ``` As shown above, platforms may be additive, in that an assembly may use functionality from multiple folders, e.g. System.IO.FileSystem's Linux build will use functionality both from Unix (common across all Unix systems) and from Linux (specific to Linux and not available across non-Linux Unix systems). -   + - Interop.*.cs files are created in a way such that every assembly consuming the file will need every DllImport it contains. - If multiple related DllImports will all be needed by every consumer, they may be declared in the same file, named for the functionality grouping, e.g. Interop.IOErrors.cs. - Otherwise, in the limit (and the expected case for most situations) each Interop.*.cs file will contain a single DllImport and associated interop types (e.g. the structs used with that signature) and helper wrappers, e.g. Interop.strerror.cs. @@ -104,7 +104,7 @@ internal static partial class Interop // contents of Common\src\Interop\Windows\ ``` (Note that this will likely result in some extra constants defined in each assembly that uses interop, which minimally violates one of the goals, but it's very minimal.) -   + - .csproj project files then include the interop code they need, e.g. ```XML @@ -170,10 +170,10 @@ To address this, we're moving to a model where all UNIX interop from dotnet/runt Guidelines for shim C++ API: -- Keep them as "thin"/1:1 as possible. - - We want to write the majority of code in C#. +- Keep them as "thin"/1:1 as possible. + - We want to write the majority of code in C#. - Never skip the shim and P/Invoke directly to the underlying platform API. It's easy to assume something is safe/guaranteed when it isn't. -- Don't cheat and take advantage of coincidental agreement between one flavor's ABI and the shim's ABI. +- Don't cheat and take advantage of coincidental agreement between one flavor's ABI and the shim's ABI. - Use PascalCase in a style closer to Win32 than libc. - If an export point has a 1:1 correspondence to the platform API, then name it after the platform API in PascalCase (e.g. stat -> Stat, fstat -> FStat). - If an export is not 1:1, then spell things out as we typically would in dotnet/runtime code (i.e. don't use abbreviations unless they come from the underlying API. diff --git a/docs/design/coreclr/botr/corelib.md b/docs/design/coreclr/botr/corelib.md index b23ddba8143a2..288e4be0353b7 100644 --- a/docs/design/coreclr/botr/corelib.md +++ b/docs/design/coreclr/botr/corelib.md @@ -201,7 +201,7 @@ Here's a real-world example from the `String` class: ```CSharp public partial sealed class String -{ +{ [MethodImpl(MethodImplOptions.InternalCall)] private extern string? IsInterned(); diff --git a/docs/design/coreclr/botr/method-descriptor.md b/docs/design/coreclr/botr/method-descriptor.md index 7d3f24ccf40f1..453e850f1d7ed 100644 --- a/docs/design/coreclr/botr/method-descriptor.md +++ b/docs/design/coreclr/botr/method-descriptor.md @@ -1,4 +1,4 @@ -Method Descriptor +Method Descriptor ================= Author: Jan Kotas ([@jkotas](https://github.com/jkotas)) - 2006 diff --git a/docs/design/coreclr/botr/readytorun-format.md b/docs/design/coreclr/botr/readytorun-format.md index 8db3423dbadfe..ff48f53c16c30 100644 --- a/docs/design/coreclr/botr/readytorun-format.md +++ b/docs/design/coreclr/botr/readytorun-format.md @@ -9,7 +9,7 @@ Revisions: # Introduction This document describes ReadyToRun format 3.1 implemented in CoreCLR as of June 2019 and not yet -implemented proposed extensions 4.1 for the support of composite R2R file format. +implemented proposed extensions 4.1 for the support of composite R2R file format. **Composite R2R file format** has basically the same structure as the traditional R2R file format defined in earlier revisions except that the output file represents a larger number of input MSIL assemblies compiled together as a logical unit. @@ -320,8 +320,8 @@ basic encoding, with extended encoding for large values). ## ReadyToRunSectionType.RuntimeFunctions -This section contains sorted array of `RUNTIME_FUNCTION` entries that describe all code blocks in the image with pointers to their unwind info. -Despite the name, these code block might represent a method body, or it could be just a part of it (e.g. a funclet) that requires its own unwind data. +This section contains sorted array of `RUNTIME_FUNCTION` entries that describe all code blocks in the image with pointers to their unwind info. +Despite the name, these code block might represent a method body, or it could be just a part of it (e.g. a funclet) that requires its own unwind data. The standard Windows xdata/pdata format is used. ARM format is used for x86 to compensate for the lack of x86 unwind info standard. The unwind info blob is immediately followed by the GC info blob. The encoding slightly differs for amd64 diff --git a/docs/design/coreclr/botr/shared-generics.md b/docs/design/coreclr/botr/shared-generics.md index 6b23563ebf223..367d2caa6d8b3 100644 --- a/docs/design/coreclr/botr/shared-generics.md +++ b/docs/design/coreclr/botr/shared-generics.md @@ -47,7 +47,7 @@ This feature is currently only supported for instantiations over reference types The dictionary used by any given generic method is pointed at by the `m_pPerInstInfo` field on the `InstantiatedMethodDesc` structure of that method. It's a direct pointer to the contents of the generic dictionary data. -On generic types, there's an extra level of indirection: the `m_pPerInstInfo` field on the `MethodTable` structure is a pointer to a table of dictionaries, and each entry in that table is a pointer to the actual generic dictionary data. This is because types have inheritance, and derived generic types inherit the dictionaries of their base types. +On generic types, there's an extra level of indirection: the `m_pPerInstInfo` field on the `MethodTable` structure is a pointer to a table of dictionaries, and each entry in that table is a pointer to the actual generic dictionary data. This is because types have inheritance, and derived generic types inherit the dictionaries of their base types. Here's an example: ```c# diff --git a/docs/design/coreclr/botr/type-system.md b/docs/design/coreclr/botr/type-system.md index dba3a0e22fa7f..d111fd79f03f7 100644 --- a/docs/design/coreclr/botr/type-system.md +++ b/docs/design/coreclr/botr/type-system.md @@ -1,4 +1,4 @@ -Type System Overview +Type System Overview ==================== Author: David Wrighton ([@davidwrighton](https://github.com/davidwrighton)) - 2010 diff --git a/docs/design/coreclr/botr/vectors-and-intrinsics.md b/docs/design/coreclr/botr/vectors-and-intrinsics.md index 0e0848950c2c0..2688db9ca376b 100644 --- a/docs/design/coreclr/botr/vectors-and-intrinsics.md +++ b/docs/design/coreclr/botr/vectors-and-intrinsics.md @@ -3,7 +3,7 @@ Vectors and Hardware Intrinsics Support --- # Introduction -The CoreCLR runtime has support for several varieties of hardware intrinsics, and various ways to compile code which uses them. This support varies by target processor, and the code produced depends on how the jit compiler is invoked. This document describes the various behaviors of intrinsics in the runtime, and concludes with implications for developers working on the runtime and libraries portions of the runtime. +The CoreCLR runtime has support for several varieties of hardware intrinsics, and various ways to compile code which uses them. This support varies by target processor, and the code produced depends on how the jit compiler is invoked. This document describes the various behaviors of intrinsics in the runtime, and concludes with implications for developers working on the runtime and libraries portions of the runtime. # Acronyms and definitions | Acronym | Definition @@ -44,8 +44,8 @@ There are 2 different implementations of AOT compilation under development at th ###Code written in System.Private.CoreLib.dll #### Crossgen implementation rules -- Any code which uses `Vector` will not be compiled AOT. (See code which throws a TypeLoadException using `IDS_EE_SIMD_NGEN_DISALLOWED`) -- Code which uses Sse and Sse2 platform hardware intrinsics is always generated as it would be at jit time. +- Any code which uses `Vector` will not be compiled AOT. (See code which throws a TypeLoadException using `IDS_EE_SIMD_NGEN_DISALLOWED`) +- Code which uses Sse and Sse2 platform hardware intrinsics is always generated as it would be at jit time. - Code which uses Sse3, Ssse3, Sse41, Sse42, Popcnt, Pclmulqdq, and Lzcnt instruction sets will be generated, but the associated IsSupported check will be a runtime check. See `FilterNamedIntrinsicMethodAttribs` for details on how this is done. - Code which uses other instruction sets will be generated as if the processor does not support that instruction set. (For instance, a usage of Avx2.IsSupported in CoreLib will generate native code where it unconditionally returns false, and then if and when tiered compilation occurs, the function may be rejitted and have code where the property returns true.) - Non-platform intrinsics which require more hardware support than the minimum supported hardware capability will not take advantage of that capability. In particular the code generated for `Vector2/3/4.Dot`, and `Math.Round`, and `MathF.Round`. See `FilterNamedIntrinsicMethodAttribs` for details. MethodImplOptions.AggressiveOptimization may be used to disable precompilation compilation of this sub-par code. @@ -58,8 +58,8 @@ The rules here provide the following characteristics. - AOT generated code which could take advantage of more advanced hardware support experiences a performance penalty until rejitted. (If a customer chooses to disable tiered compilation, then customer code may always run slowly). #### Code review rules for code written in System.Private.CoreLib.dll -- Any use of a platform intrinsic in the codebase MUST be wrapped with a call to the associated IsSupported property. This wrapping MUST be done within the same function that uses the hardware intrinsic, and MUST NOT be in a wrapper function unless it is one of the intrinsics that are enabled by default for crossgen compilation of System.Private.CoreLib (See list above in the implementation rules section). -- Within a single function that uses platform intrinsics, it must behave identically regardless of whether IsSupported returns true or not. This rule is required as code inside of an IsSupported check that calls a helper function cannot assume that the helper function will itself see its use of the same IsSupported check return true. This is due to the impact of tiered compilation on code execution within the process. +- Any use of a platform intrinsic in the codebase MUST be wrapped with a call to the associated IsSupported property. This wrapping MUST be done within the same function that uses the hardware intrinsic, and MUST NOT be in a wrapper function unless it is one of the intrinsics that are enabled by default for crossgen compilation of System.Private.CoreLib (See list above in the implementation rules section). +- Within a single function that uses platform intrinsics, it must behave identically regardless of whether IsSupported returns true or not. This rule is required as code inside of an IsSupported check that calls a helper function cannot assume that the helper function will itself see its use of the same IsSupported check return true. This is due to the impact of tiered compilation on code execution within the process. - Excessive use of intrinsics may cause startup performance problems due to additional jitting, or may not achieve desired performance characteristics due to suboptimal codegen. ACCEPTABLE Code @@ -130,7 +130,7 @@ public class BitOperations of this method may be compiled as if the Avx2 feature is not available, and is not reliably rejitted at the same time as the PopCount function. - As a special note, on the x86 and x64 platforms, this generally unsafe pattern may be used + As a special note, on the x86 and x64 platforms, this generally unsafe pattern may be used with the Sse, Sse2, Sse3, Sssse3, Ssse41 and Sse42 instruction sets as those instruction sets are treated specially by both crossgen1 and crossgen2 when compiling System.Private.CoreLib.dll. } @@ -140,16 +140,16 @@ public class BitOperations ### Code written in other assemblies (both first and third party) #### Crossgen implementation rules -- Any code which uses an intrinsic from the `System.Runtime.Intrinsics.Arm` or `System.Runtime.Intrinsics.X86` namespace will not be compiled AOT. (See code which throws a TypeLoadException using `IDS_EE_HWINTRINSIC_NGEN_DISALLOWED`) -- Any code which uses `Vector` will not be compiled AOT. (See code which throws a TypeLoadException using `IDS_EE_SIMD_NGEN_DISALLOWED`) -- Any code which uses `Vector64`, `Vector128` or `Vector256` will not be compiled AOT. (See code which throws a TypeLoadException using `IDS_EE_HWINTRINSIC_NGEN_DISALLOWED`) +- Any code which uses an intrinsic from the `System.Runtime.Intrinsics.Arm` or `System.Runtime.Intrinsics.X86` namespace will not be compiled AOT. (See code which throws a TypeLoadException using `IDS_EE_HWINTRINSIC_NGEN_DISALLOWED`) +- Any code which uses `Vector` will not be compiled AOT. (See code which throws a TypeLoadException using `IDS_EE_SIMD_NGEN_DISALLOWED`) +- Any code which uses `Vector64`, `Vector128` or `Vector256` will not be compiled AOT. (See code which throws a TypeLoadException using `IDS_EE_HWINTRINSIC_NGEN_DISALLOWED`) - Non-platform intrinsics which require more hardware support than the minimum supported hardware capability will not take advantage of that capability. In particular the code generated for Vector2/3/4 is sub-optimal. MethodImplOptions.AggressiveOptimization may be used to disable compilation of this sub-par code. #### Characteristics which result from rules The rules here provide the following characteristics. - Use of platform specific hardware intrinsics causes runtime jit and startup time concerns. - Use of `Vector` causes runtime jit and startup time concerns -- AOT generated code which could take advantage of more advanced hardware support experiences a performance penalty until rejitted. (If a customer chooses to disable tiered compilation, then customer code may always run slowly). +- AOT generated code which could take advantage of more advanced hardware support experiences a performance penalty until rejitted. (If a customer chooses to disable tiered compilation, then customer code may always run slowly). #### Code review rules for use of platform intrinsics - Any use of a platform intrinsic in the codebase SHOULD be wrapped with a call to the associated IsSupported property. This wrapping may be done within the same function that uses the hardware intrinsic, but this is not required as long as the programmer can control all entrypoints to a function that uses the hardware intrinsic. @@ -183,7 +183,7 @@ Since System.Private.CoreLib.dll is known to be code reviewed with the code revi # Mechanisms in the JIT to generate correct code to handle varied instruction set support -The JIT receives flags which instruct it on what instruction sets are valid to use, and has access to a new jit interface api `notifyInstructionSetUsage(isa, bool supportBehaviorRequired)`. +The JIT receives flags which instruct it on what instruction sets are valid to use, and has access to a new jit interface api `notifyInstructionSetUsage(isa, bool supportBehaviorRequired)`. The notifyInstructionSetUsage api is used to notify the AOT compiler infrastructure that the code may only execute if the runtime environment of the code is exactly the same as the boolean parameter indicates it should be. For instance, if `notifyInstructionSetUsage(Avx, false)` is used, then the code generated must not be used if the `Avx` instruction set is useable. Similarly `notifyInstructionSetUsage(Avx, true)` will indicate that the code may only be used if the `Avx` instruction set is available. diff --git a/docs/design/coreclr/botr/xplat-minidump-generation.md b/docs/design/coreclr/botr/xplat-minidump-generation.md index 38b6120666273..46fd6bdf37276 100644 --- a/docs/design/coreclr/botr/xplat-minidump-generation.md +++ b/docs/design/coreclr/botr/xplat-minidump-generation.md @@ -46,7 +46,7 @@ As of .NET 5.0, createdump is supported on MacOS but instead of the MachO dump f ### Windows ### -As of .NET 5.0, createdump and the below configuration environment variables are supported on Windows. It is implemented using the Windows MiniDumpWriteDump API. This allows consistent crash/unhandled exception dumps across all of our platforms. +As of .NET 5.0, createdump and the below configuration environment variables are supported on Windows. It is implemented using the Windows MiniDumpWriteDump API. This allows consistent crash/unhandled exception dumps across all of our platforms. # Configuration/Policy # diff --git a/docs/design/coreclr/jit/jit-call-morphing.md b/docs/design/coreclr/jit/jit-call-morphing.md index cf46fa674b14b..c454690e21df2 100644 --- a/docs/design/coreclr/jit/jit-call-morphing.md +++ b/docs/design/coreclr/jit/jit-call-morphing.md @@ -17,7 +17,7 @@ post/pre increment, perhaps like this: `Foo(j, a[j++])`. Here `j` is updated vi when the second arg is evaluated, so the earlier uses of `j` would need to be evaluated and saved in a new LclVar. -  + One simple approach would be to create new single definition, single use LclVars for every argument that is passed. This would preserve the evaluation order. However, it would potentially create hundreds of LclVar for moderately sized methods and that would overflow the limited number of @@ -25,7 +25,7 @@ tracked local variables in the JIT. One observation is that many arguments to m either constants or LclVars and can be set up anytime we want. They usually will not need a new LclVar to preserve the order of evaluation rule. -  + Each argument is an arbitrary expression tree. The JIT tracks a summary of observable side-effects using a set of five bit flags in every GenTree node: `GTF_ASG`, `GTF_CALL`, `GTF_EXCEPT`, `GTF_GLOB_REF`, and `GTF_ORDER_SIDEEFF`. These flags are propagated up the tree so that the top node has a particular diff --git a/docs/design/coreclr/jit/lsra-detail.md b/docs/design/coreclr/jit/lsra-detail.md index fce0d4d54dff6..a637b8ac6982c 100644 --- a/docs/design/coreclr/jit/lsra-detail.md +++ b/docs/design/coreclr/jit/lsra-detail.md @@ -309,7 +309,7 @@ After LSRA, the graph has the following properties: - However, if such a node is constrained to a set of registers, and its current location does not satisfy that requirement, LSRA - must insert a `GT_COPY` node between the node and its parent.  + must insert a `GT_COPY` node between the node and its parent. The `_gtRegNum` on the `GT_COPY` node must satisfy the register requirement of the parent. @@ -1088,11 +1088,11 @@ term "EH Var" means a `lclVar` marked `lvLiveInOutOfHndlr`): - Adjust the heuristics: - 1. For determining whether an EH var should be a candidate for register allocation, + 1. For determining whether an EH var should be a candidate for register allocation, e.g. if the defs outweight the uses. 2. For determining when a definition of an EH var should be only stored to the stack, - rather than also remaining live in the register. + rather than also remaining live in the register. - If the weight of the defs exceeds the weight of the blocks with successors in exception regions, consider spilling the `lclVar` to the stack only at those boundaries. @@ -1241,7 +1241,7 @@ kill site. Issue [\#9767](https://github.com/dotnet/runtime/issues/9767) captures the issue that the "spill always" stress mode, `LSRA_SPILL_ALWAYS`, `COMPlus_JitStressRegs=0x800` doesn't work properly. -Issue [\#6261](https://github.com/dotnet/runtime/issues/6261) has to do with `RegOptional` +Issue [\#6261](https://github.com/dotnet/runtime/issues/6261) has to do with `RegOptional` `RefPositions` that are marked as `copyReg` or `moveReg`. See the notes on this issue; I don't think such cases should arise, but there may be some cleanup needed here. @@ -1249,7 +1249,7 @@ Issue [\#5793](https://github.com/dotnet/runtime/issues/5793) suggests adding a allocates registers forr mullti-reg nodes in the reverse of the ABI requirements. Issue [#10691](https://github.com/dotnet/runtime/issues/10691) suggests adding a stress mode that -deliberately trashes registers that are not currently occupied (e.g. at block boundaries). +deliberately trashes registers that are not currently occupied (e.g. at block boundaries). References ---------- @@ -1284,4 +1284,4 @@ References 7. Yatsina, M. "LLVM Greedy Register Allocator," LLVM Dev Meeting, April 2018. - (Last retrieved July 2020) \ No newline at end of file + (Last retrieved July 2020) diff --git a/docs/design/coreclr/jit/ryujit-tutorial.md b/docs/design/coreclr/jit/ryujit-tutorial.md index 050c11495b6a8..69e90580fbfa4 100644 --- a/docs/design/coreclr/jit/ryujit-tutorial.md +++ b/docs/design/coreclr/jit/ryujit-tutorial.md @@ -606,7 +606,7 @@ public static int PopCount(ulong bitVectorArg) #### Notes The sample I'm going to walk through implements support for pop count (counting the number of '1' bits in a 64-bit value). -  + We're going to start by assuming that we have a method with a known signature that implements PopCount. Here's the implementation we're going to use. It simply takes the input value, and keeps anding with one, and then shifting right. We're first going to simply recognize the name and signature, and replace the method call with a simple PopCnt IR node. diff --git a/docs/design/coreclr/profiling/Profiler Attach on CoreCLR.md b/docs/design/coreclr/profiling/Profiler Attach on CoreCLR.md index 1e494a79f364a..6a724e4fdc2f2 100644 --- a/docs/design/coreclr/profiling/Profiler Attach on CoreCLR.md +++ b/docs/design/coreclr/profiling/Profiler Attach on CoreCLR.md @@ -13,7 +13,7 @@ Attaching a profiler to a running CoreCLR process involves sending a message fro 2) `uint attachTimeout` - (Required) A timeout that informs the runtime how long to wait while attempting to attach. This does not impact the timeout of trying to send the attach message. 3) `Guid profilerGuid` - (Required) The profiler's GUID to use when initializing. 4) `string profilerPath` - (Required) The path to the profiler on disk. -5) `byte[] additionalData` - (Optional) A data blob that will be passed to `ICorProfilerCallback3::InitializeForAttach` as `pvClientData`. +5) `byte[] additionalData` - (Optional) A data blob that will be passed to `ICorProfilerCallback3::InitializeForAttach` as `pvClientData`. This method returns a status HR following the usual convention, 0 (S_OK) means a profiler was successfully attached and any other value is an error indicating what went wrong. diff --git a/docs/design/coreclr/profiling/Profiler Breaking Changes.md b/docs/design/coreclr/profiling/Profiler Breaking Changes.md index 07c54c7277ff7..0e6d5e2444b92 100644 --- a/docs/design/coreclr/profiling/Profiler Breaking Changes.md +++ b/docs/design/coreclr/profiling/Profiler Breaking Changes.md @@ -4,4 +4,4 @@ Over time we will need to modify the Profiler APIs, this document will serve as 1. Code Versioning introduced changes documented [here](../../features/code-versioning-profiler-breaking-changes.md) 2. The work to allow adding new types and methods after module load means ICorProfilerInfo7::ApplyMetadata will now potentially trigger a GC, and will not be callable in situations where a GC can not happen (for example ICorProfilerCallback::RootReferences). -3. As part of the work to allow ReJIT on attach ReJITted methods will no longer be inlined (ever). Since the inlining is blocked there won't be a `ICorProfilerCallback::JITInlining` callback. \ No newline at end of file +3. As part of the work to allow ReJIT on attach ReJITted methods will no longer be inlined (ever). Since the inlining is blocked there won't be a `ICorProfilerCallback::JITInlining` callback. diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/Attach.md b/docs/design/coreclr/profiling/davbr-blog-archive/Attach.md index 93335dd51fecb..53f17e2316c3d 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/Attach.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/Attach.md @@ -7,7 +7,7 @@ Profiler attach is a feature that allows you to attach a profiler to an already Please note! You can't just take any profiler you bought and suddenly be able to attach it to a running application. The profiler must be built with "attachability" in mind. So if you're a profiler developer looking to pump some attachability into your product, read on--this article is for you. Everyone else, this article will probably be less useful--but just as riveting. -# +# # The Players @@ -19,17 +19,17 @@ In order to force your profiler DLL to load into the target profilee process, yo # Inside the Trigger Process -Your trigger uses a simple API method, AttachProfiler, to request the target process to load your profiler. Where is this method defined? Well, it doesn't make much sense to put it on ICorProfilerInfo, since that interface is only available to a profiler after it's been loaded. You could imagine a C export from mscoree.dll. But because of in-process side-by-side CLR instances, we're moving away from mscoree.dll exports to a COM-based interface model called "metahost". +Your trigger uses a simple API method, AttachProfiler, to request the target process to load your profiler. Where is this method defined? Well, it doesn't make much sense to put it on ICorProfilerInfo, since that interface is only available to a profiler after it's been loaded. You could imagine a C export from mscoree.dll. But because of in-process side-by-side CLR instances, we're moving away from mscoree.dll exports to a COM-based interface model called "metahost". ## Meta-whos-its? Whereas the "hosting" interfaces enable one to host and manage a CLR in a process, the "metahost" interfaces allow one to manage multiple CLRs that may be installed onto a machine or loaded into a single process. Here's a high-level view of how you navigate your way through metahost to find AttachProfiler() (there’s a pointer to actual sample code below). -- Get ICLRMetaHost -- Enumerate the CLRs loaded into the target process -- Get ICLRRuntimeInfo for the particular CLR in the target process you want to profile -- Get the corresponding ICLRProfiling -- Call ICLRProfiling::AttachProfiler +- Get ICLRMetaHost +- Enumerate the CLRs loaded into the target process +- Get ICLRRuntimeInfo for the particular CLR in the target process you want to profile +- Get the corresponding ICLRProfiling +- Call ICLRProfiling::AttachProfiler ## Users and Integrity @@ -57,7 +57,7 @@ From your InitializeForAttach implementation, your profiler will call SetEventMa It was impossible to enable all profiling scenarios for attach in the time we had for the V4 release. So only profilers that do **sampling** and **memory** analysis will function properly after attaching to a live process. Attempts to use other profiling APIs after attach will be met with CORPROF\_E\_UNSUPPORTED\_FOR\_ATTACHING\_PROFILER. -### +### ## Specific Callback Limitations @@ -67,14 +67,14 @@ When your attaching profiler calls SetEventMask, you will be limited to only tho Most of the ICorProfilerInfo\* methods are available to your attaching profiler, however some are not--particularly those involved in **IL rewriting**. Here's a list of all ICorProfilerInfo\* methods NOT supported for attaching profilers: -- GetILFunctionBody -- GetILFunctionBodyAllocator -- SetILFunctionBody -- SetILInstrumentedCodeMap -- SetEnterLeaveFunctionHooks\* -- SetFunctionIDMapper\* -- GetNotifiedExceptionClauseInfo -- All methods related to Enter/Leave/Tailcall +- GetILFunctionBody +- GetILFunctionBodyAllocator +- SetILFunctionBody +- SetILInstrumentedCodeMap +- SetEnterLeaveFunctionHooks\* +- SetFunctionIDMapper\* +- GetNotifiedExceptionClauseInfo +- All methods related to Enter/Leave/Tailcall It's expected that future releases of the CLR will enable more API methods for use by attaching profilers. @@ -84,9 +84,9 @@ It's expected that future releases of the CLR will enable more API methods for u To understand limitations around the GC modes, here's a quick review of the GC modes an app can run under: -- **Workstation Blocking mode**. The thread that triggered the GC performs the GC while all other threads executing managed code must wait. -- **Workstation Concurrent / Background mode (the default)**. Concurrent GC (V1 & V2) allows portions of a full GC to execute while other threads are allowed to run. Background GC (its replacement in V4) takes it one step further, and also allows an ephemeral GC (i.e., gen 0 or gen 1) to execute while a gen 2 GC is executing. -- **Server mode**. Hosts like ASP.NET may choose to enable server mode which creates a heap + dedicated GC thread per CPU. This allows GCs to be fanned out to multiple threads. +- **Workstation Blocking mode**. The thread that triggered the GC performs the GC while all other threads executing managed code must wait. +- **Workstation Concurrent / Background mode (the default)**. Concurrent GC (V1 & V2) allows portions of a full GC to execute while other threads are allowed to run. Background GC (its replacement in V4) takes it one step further, and also allows an ephemeral GC (i.e., gen 0 or gen 1) to execute while a gen 2 GC is executing. +- **Server mode**. Hosts like ASP.NET may choose to enable server mode which creates a heap + dedicated GC thread per CPU. This allows GCs to be fanned out to multiple threads. Of course, [Maoni's blog](https://devblogs.microsoft.com/dotnet/author/maoni/) is required reading for anyone who wants to understand how the GC works. @@ -96,12 +96,12 @@ So here's the catch. What if a V4 app starts up in background GC mode _without_ Of course, you could forcibly turn off concurrent / background mode every time the app starts up via a config file: -| +| -\ - \ - \ - \ +\ + \ + \ + \ \ | diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/Attach2.md b/docs/design/coreclr/profiling/davbr-blog-archive/Attach2.md index d2db6ca613012..10847f05b9c74 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/Attach2.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/Attach2.md @@ -11,12 +11,12 @@ A profiler that loads on startup of an application has the option to know the en [NoBirthAnnouncement](media/NoBirthAnnouncement.JPG) Drawing by Magdalena Hermawan - + There are two fundamental ways your profiler can catch up on the current state of an application: -- Lazy catch-up—as the profiler encounters new IDs, the profiler queries information about those IDs as it needs them, rather than assuming it has a full cache that’s always built up as the IDs are first created. This is analogous to Dorothy meeting a new grown-up, and gracefully accepting the fact that that person exists. -- Enumeration—for certain kinds of IDs, the profiler can (at attach time) request a complete list of the currently active IDs and query information about them at that time. Sort of like Dorothy first going to the Oz City Hall and looking up the birth records for everyone. +- Lazy catch-up—as the profiler encounters new IDs, the profiler queries information about those IDs as it needs them, rather than assuming it has a full cache that’s always built up as the IDs are first created. This is analogous to Dorothy meeting a new grown-up, and gracefully accepting the fact that that person exists. +- Enumeration—for certain kinds of IDs, the profiler can (at attach time) request a complete list of the currently active IDs and query information about them at that time. Sort of like Dorothy first going to the Oz City Hall and looking up the birth records for everyone. Lazy catch-up is fairly self-explanatory. For example, if your sampling profiler encounters an IP in a FunctionID you’ve never seen before, just look up whatever info you need about that FunctionID the first time you encounter it, rather than assuming you’d already built up a cache when the function was first JITted. And if you discover that FunctionID resides in a module you’ve never seen before, then just look up whatever info you need about that ModuleID at that point, rather than assuming you already have a complete cache of all modules. Many of you are already doing something like this today if you support sampling against regular NGENd images (since you don’t get JIT notifications of those functions anyway). @@ -26,8 +26,8 @@ Enumeration, on the other hand, has some caveats and is worthwhile to describe i Some kinds of IDs have new enumerator methods as part of the profiling API. In particular: -- ICorProfilerInfo3::EnumModules -- ICorProfilerInfo3::EnumJITedFunctions +- ICorProfilerInfo3::EnumModules +- ICorProfilerInfo3::EnumJITedFunctions Your profiler calls these methods, and they return a standard enumerator you use to iterate through all of the currently-loaded IDs of that type. It’s worth noting that EnumJITedFunctions only enumerates FunctionIDs for which you would receive JITCompilationStarted/Finished events, and will not include FunctionIDs from NGENd modules. @@ -39,24 +39,24 @@ As you may recall, once your profiler is attached to the process, the CLR calls Bad timeline (loading; enumerating too soon): -1. Profiler attaches -2. Profiler calls EnumModules -3. Module starts to load -4. ModuleID is now enumerable -5. ModuleLoadFinished event would fire here if events were enabled (but they’re not yet!) -6. CLR enables events +1. Profiler attaches +2. Profiler calls EnumModules +3. Module starts to load +4. ModuleID is now enumerable +5. ModuleLoadFinished event would fire here if events were enabled (but they’re not yet!) +6. CLR enables events The problem is that the profiler calls EnumModules too early. If your profiler only calls EnumModules after CLR enables events, then you’re assured of either seeing a ModuleID via EnumModules or via a ModuleLoad event. In the above scenario, your profiler might as well have never done enumeration at all, since it will still not be notified of the ModuleID before it comes across that ModuleID in action later on. It gets even worse for modules that unload: Bad timeline (unloading; enumerating too soon): -1. Module loads -2. ModuleID is now enumerable -3. Profiler attaches -4. Profiler calls EnumModules (includes the ModuleID) -5. Module starts to unload -6. ModuleUnloadStarted event would fire here if events were enabled (but they’re not yet!) -7. CLR enables events +1. Module loads +2. ModuleID is now enumerable +3. Profiler attaches +4. Profiler calls EnumModules (includes the ModuleID) +5. Module starts to unload +6. ModuleUnloadStarted event would fire here if events were enabled (but they’re not yet!) +7. CLR enables events In the above case, the profiler discovers a ModuleID via EnumModules, but has no idea that the module is now in the process of unloading. So the profiler might query information about the stale ModuleID, potentially causing an AV. Again, this is caused because the profiler called the enumeration API too soon (i.e., before the CLR enabled event callbacks). @@ -68,24 +68,24 @@ When your profiler calls the Enum\* methods, the CLR creates a snapshot of all Bad timeline (loading): -1. Module starts to load -2. ModuleLoadFinished event would fire here if events were enabled (but they’re not yet—no profiler is attached!) -3. Profiler attaches -4. CLR enables events, calls ProfilerAttachComplete() -5. Profiler calls EnumModules -6. ModuleID is now enumerable +1. Module starts to load +2. ModuleLoadFinished event would fire here if events were enabled (but they’re not yet—no profiler is attached!) +3. Profiler attaches +4. CLR enables events, calls ProfilerAttachComplete() +5. Profiler calls EnumModules +6. ModuleID is now enumerable Because 2 comes before 6, it’s possible for a profiler to attach and grab an enumeration in the middle, and thus never hear about a ModuleID (even though the profiler avoided Race #1 from the previous section). Again, an even worse problem occurs for module unloading. Suppose the CLR were to change an ID’s enumerable status to false after sending the unload event. That would also lead to holes: Bad timeline (unloading): -1. Module loads, event would fire if profiler were attached (but it’s not), then ModuleID becomes enumerable -2. Module starts to unload -3. ModuleUnloadStarted event would fire here if events were enabled (but they’re not yet—no profiler is attached!) -4. Profiler attaches -5. CLR enables events, calls ProfilerAttachComplete() -6. Profiler calls EnumModules (ModuleID is still enumerable, so profiler discovers ModuleID at this point) -7. ModuleID is no longer enumerable +1. Module loads, event would fire if profiler were attached (but it’s not), then ModuleID becomes enumerable +2. Module starts to unload +3. ModuleUnloadStarted event would fire here if events were enabled (but they’re not yet—no profiler is attached!) +4. Profiler attaches +5. CLR enables events, calls ProfilerAttachComplete() +6. Profiler calls EnumModules (ModuleID is still enumerable, so profiler discovers ModuleID at this point) +7. ModuleID is no longer enumerable Because 3 comes before 7, a profiler could attach in the middle, grab an enumeration, discover the ModuleID via the enumeration, and have no idea that module was in the process of unloading. If the profiler were to use that ModuleID later on, an AV could result. The above led to the following golden rule: @@ -93,10 +93,10 @@ Because 3 comes before 7, a profiler could attach in the middle, grab an enumera In other words, an ID becomes enumerable _before_ the LoadFinished (or JITCompilationFinished) event. And an ID ceases to be enumerable _before_ the UnloadStarted event. Or you can think of it as, “The event is always last”. This eliminates any potential holes. So to be even more explicit, here’s the enumerability vs. event ordering: -1. ID available in enumerations snapped now -2. LoadFinished -3. ID no longer in enumerations snapped now -4. UnloadStarted +1. ID available in enumerations snapped now +2. LoadFinished +3. ID no longer in enumerations snapped now +4. UnloadStarted If an ID is present, the profiler will discover the ID via the enumerator or a LoadFinished event (or both). If an ID is not present, the profiler will either not see the ID via the enumerator or will see an UnloadStarted event (or both). In all cases, the event is more recent, and so the profiler should always trust an event over an enumeration that was generated prior. (More on that last point later.) @@ -104,36 +104,36 @@ The astute reader will notice that what we’ve done here is trade one race for Good timeline (loading with duplicate): -1. Module starts to load -2. ModuleID is now enumerable -3. Profiler attaches -4. CLR enables events, calls ProfilerAttachComplete() -5. Profiler calls EnumModules -6. Profiler receives ModuleLoadFinished +1. Module starts to load +2. ModuleID is now enumerable +3. Profiler attaches +4. CLR enables events, calls ProfilerAttachComplete() +5. Profiler calls EnumModules +6. Profiler receives ModuleLoadFinished At first it might seem a little strange. The enumerator contains the ModuleID, so the profiler sees that the module is loaded. But then the profiler receives a ModuleLoadFinished event, which might seem odd, since the enumerator implied the module was already loaded. This is what I mean by “duplicate”—the profiler is notified of a ModuleID twice (once via the enumeration, and once via the event). The profiler will need to be resilient to this. Although it’s a bit awkward, it’s better than the alternative of a hole, since the profiler would have no way to know the hole occurred. Unloading has a similar situation: Good timeline (unloading with duplicate): -1. Module loads, event would have fired if profiler were attached (but it’s not), ModuleID becomes enumerable -2. Module starts to unload -3. ModuleID is no longer enumerable -4. Profiler attaches -5. CLR enables events, calls ProfilerAttachComplete() -6. Profiler calls EnumModules -7. Profiler receives ModuleUnloadStarted event +1. Module loads, event would have fired if profiler were attached (but it’s not), ModuleID becomes enumerable +2. Module starts to unload +3. ModuleID is no longer enumerable +4. Profiler attaches +5. CLR enables events, calls ProfilerAttachComplete() +6. Profiler calls EnumModules +7. Profiler receives ModuleUnloadStarted event In step 6, the profiler does not see the unloading ModuleID (since it’s no longer enumerable). But in step 7 the profiler is notified that the ModuleID is unloading. Perhaps it’s a bit awkward that the profiler would be told that a seemingly nonexistent ModuleID is unloading. But again, this is better than the alternative, where a profiler finds an unloading ID in the enumeration, and is never told that the ModuleID got unloaded. One more case that’s worthwhile to bring out occurs when we move the profiler attach a bit earlier in the sequence. Good timeline (unloading without duplicate): -1. Module loads, event would fire if profiler were attached, ModuleID becomes enumerable -2. Module starts to unload -3. Profiler attaches -4. CLR enables events, calls ProfilerAttachComplete() -5. Profiler calls EnumModules (ModuleID is still present in the enumeration) -6. ModuleID is no longer enumerable -7. Profiler receives ModuleUnloadStarted event +1. Module loads, event would fire if profiler were attached, ModuleID becomes enumerable +2. Module starts to unload +3. Profiler attaches +4. CLR enables events, calls ProfilerAttachComplete() +5. Profiler calls EnumModules (ModuleID is still present in the enumeration) +6. ModuleID is no longer enumerable +7. Profiler receives ModuleUnloadStarted event Here the profiler discovers the ModuleID exists in step 5 (as the ModuleID is still enumerable at that point), but the profiler almost immediately after discovers that the module is unloading in step 7. As stated above, events are more recent, and should always take precedence over enumerations that were generated prior. This could get a bit tricky, though, as the profiler generates an enumeration before it iterates over the enumeration. In the above sequence, the enumeration is generated in step 5. However, the profiler could be iterating though the generated enumeration for quite some time, and might not come across the unloading ModuleID until after step 7 (multiple threads means fun for everyone!). For this reason, it’s important for the profiler to give precedence to events that occur after the enumeration was _generated_, even though iteration over that enumeration might occur later. @@ -151,7 +151,7 @@ It may be beneficial to program your profiler such that, upon attaching to the p It’s worth reiterating a limitation I stated in the first attach post (linked above): the ObjectAllocated() callback is unavailable to profilers that attach to running processes. Therefore, any logic your profiler has that assumes it gets all the ObjectAllocated() callbacks will need to be addressed. Any objects newly allocated since the last GC may still be unknown to your profiler until it comes across their references via GC callbacks during the next GC (unless your profiler comes across those objects in other ways—example: as parameters to methods you hook with the Enter/Leave/Tailcall probes). - + OK, that about covers the first steps your profiler should take once it attaches to a running process. It will either need to use lazy catch-up or the catch-up enumerations (or, quite likely, a combination of both). When using the enumerations, be careful to avoid holes (by calling the enumeration methods from inside ProfilerAttachComplete()), and be resilient to receiving information duplicated across the enumeration and the load / unload events. For memory profilers, be wary of GCs already in progress at the time your profiler attaches, and consider inducing your own GC at attach-time to build your initial cache of GC objects. diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/CORPROF_E_UNSUPPORTED_CALL_SEQUENCE.md b/docs/design/coreclr/profiling/davbr-blog-archive/CORPROF_E_UNSUPPORTED_CALL_SEQUENCE.md index c6feb97c779a9..b2a900ca8757e 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/CORPROF_E_UNSUPPORTED_CALL_SEQUENCE.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/CORPROF_E_UNSUPPORTED_CALL_SEQUENCE.md @@ -11,24 +11,24 @@ On the other hand, if you're hijacking or otherwise calling ICorProfilerInfo fun In 2.0 we've added some simple checks to help you avoid this problem. If you call an unsafe ICorProfilerInfo function asynchronously, instead of crossing its fingers and trying, it will fail with CORPROF\_E\_UNSUPPORTED\_CALL\_SEQUENCE. The general rule of thumb is, nothing is safe to call asynchronously. But here are the exceptions that are safe, and that we specifically allow to be called asynchronously: -- GetEventMask/SetEventMask -- GetCurrentThreadID -- GetThreadContext -- GetThreadAppDomain -- GetFunctionFromIP -- GetFunctionInfo/GetFunctionInfo2 -- GetCodeInfo/GetCodeInfo2 -- GetModuleInfo -- GetClassIDInfo/GetClassIDInfo2 -- IsArrayClass -- SetFunctionIDMapper -- DoStackSnapshot +- GetEventMask/SetEventMask +- GetCurrentThreadID +- GetThreadContext +- GetThreadAppDomain +- GetFunctionFromIP +- GetFunctionInfo/GetFunctionInfo2 +- GetCodeInfo/GetCodeInfo2 +- GetModuleInfo +- GetClassIDInfo/GetClassIDInfo2 +- IsArrayClass +- SetFunctionIDMapper +- DoStackSnapshot There are also a few things to keep in mind: -1. ICorProfilerInfo calls made from within the fast-path Enter/Leave callbacks are considered asynchronous. (Though ICorProfilerInfo calls made from within the _slow_-path Enter/Leave callbacks are considered synchronous.) See the blog entries [here](ELT - The Basics.md) and [here](http://blogs.msdn.com/jkeljo/archive/2005/08/11/450506.aspx) for more info on fast / slow path. -2. ICorProfilerInfo calls made from within instrumented code (i.e., IL you've rewritten to call into your profiler and then into ICorProfilerInfo) are considered asynchronous. -3. Calls made inside your FunctionIDMapper hook are considered to be synchronous. -4. Calls made on threads created by your profiler, are always considered to be synchronous. (This is because there's no danger of conflicts resulting from interrupting and then re-entering the CLR on that thread, since a profiler-created thread was not in the CLR to begin with.) -5. Calls made inside a StackSnapshotCallback are considered to be synchronous iff the call to DoStackSnapshot was synchronous. +1. ICorProfilerInfo calls made from within the fast-path Enter/Leave callbacks are considered asynchronous. (Though ICorProfilerInfo calls made from within the _slow_-path Enter/Leave callbacks are considered synchronous.) See the blog entries [here](ELT - The Basics.md) and [here](http://blogs.msdn.com/jkeljo/archive/2005/08/11/450506.aspx) for more info on fast / slow path. +2. ICorProfilerInfo calls made from within instrumented code (i.e., IL you've rewritten to call into your profiler and then into ICorProfilerInfo) are considered asynchronous. +3. Calls made inside your FunctionIDMapper hook are considered to be synchronous. +4. Calls made on threads created by your profiler, are always considered to be synchronous. (This is because there's no danger of conflicts resulting from interrupting and then re-entering the CLR on that thread, since a profiler-created thread was not in the CLR to begin with.) +5. Calls made inside a StackSnapshotCallback are considered to be synchronous iff the call to DoStackSnapshot was synchronous. diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/Debugging - Activation.md b/docs/design/coreclr/profiling/davbr-blog-archive/Debugging - Activation.md index 79ee3e1d291a0..9cda190345082 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/Debugging - Activation.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/Debugging - Activation.md @@ -15,7 +15,7 @@ Environment variables --\> Registry --\> Profiler DLL on File system. The first link in this chain is to check the environment variables inside the process that should be profiled. If you're running the process from a command-prompt, you can just try a "set co" from the command prompt: -| +| ``` **C:\>** set co (blah blah, other vars beginning with "co") diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/Debugging - SOS and IDs.md b/docs/design/coreclr/profiling/davbr-blog-archive/Debugging - SOS and IDs.md index 8f032244926ca..6f3da89e5c5ef 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/Debugging - SOS and IDs.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/Debugging - SOS and IDs.md @@ -9,7 +9,7 @@ SOS.DLL is a debugger extension DLL that ships with the CLR. You'll find it sit In windbg, you'll need mscorwks.dll to load first, and then you can load SOS. Often, I don't need SOS until well into my debugging session, at which point mscorwks.dll has already been loaded anyway. However, there are some cases where you'd like SOS loaded at the first possible moment, so you can use some of its commands early (like !bpmd to set a breakpoint on a managed method). So a surefire way to get SOS loaded ASAP is to have the debugger break when mscorwks gets loaded (e.g., "sxe ld mscorwks"). Once mscorwks is loaded, you can load SOS using the .loadby command: -| +| ``` 0:000\> **sxe ld mscorwks** 0:000\> g @@ -35,7 +35,7 @@ As far as your profiler is concerned, a FunctionID is just an opaque number. It Ok, so FunctionID = (MethodDesc \*). How does that help you? SOS just so happens to have a command to inspect MethodDescs: !dumpmd. So if you're in a debugger looking at your profiler code that's operating on a FunctionID, it can beneficial to you to find out which function that FunctionID actually refers to. In the example below, the debugger will break in my proifler's JITCompilationStarted callback and look at the FunctionID. It's assumed that you've already loaded SOS as per above. -| +| ``` 0:000\> bu UnitTestSampleProfiler!SampleCallbackImpl::JITCompilationStarted 0:000\> g @@ -54,7 +54,7 @@ Breakpoint 0 hit The debugger is now sitting at the beginning of my profiler's JITCompilationStarted callback. Let's take a look at the parameters. -| +| ``` 0:000\> dv this = 0x00c133f8 @@ -65,7 +65,7 @@ The debugger is now sitting at the beginning of my profiler's JITCompilationStar Aha, that's the FunctionID about to get JITted. Now use SOS to see what that function really is. -| +| ``` 0:000\> !dumpmd 0x1e3170 Method Name: test.Class1.Main(System.String[]) @@ -79,7 +79,7 @@ Aha, that's the FunctionID about to get JITted. Now use SOS to see what that fu Lots of juicy info here, though the Method Name typically is what helps me the most in my debugging sessions. mdToken tells us the metadata token for this method. MethodTable tells us where another internal CLR data structure is stored that contains information about the class containing the function. In fact, the profiing API's ClassID is simply a MethodTable \*. [Note: the "Class: 001e1288" in the output above is very different from the MethodTable, and thus different from the profiling API's ClassID. Don't let the name fool you!] So we could go and inspect a bit further by dumping information about the MethodTable: -| +| ``` 0:000\> !dumpmt 0x001e3180 EEClass: 001e1288 @@ -126,7 +126,7 @@ It would probably be quicker to list what _isn't_ useful! I encourage you to do !bpmd lets you place a breakpoint on a managed method. Just specify the module name and the fully-qualified method name. For example: -| +| ``` !bpmd MyModule.exe MyNamespace.MyClass.Foo ``` @@ -136,7 +136,7 @@ If the method hasn't jitted yet, no worries. A "pending" breakpoint is placed. !PrintException: If you use this without arguments you get to see a pretty-printing of the last outstanding managed exception on the thread; or specify a particular Exception object's address. - + Ok, that about does it for SOS. Hopefully this info can help you track down problems a little faster, or better yet, perhaps this can help you step through and verify your code before problems arise. diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/DoStackSnapshot - Callback CONTEXT Registers.md b/docs/design/coreclr/profiling/davbr-blog-archive/DoStackSnapshot - Callback CONTEXT Registers.md index 2f65767082600..ede4e736d3995 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/DoStackSnapshot - Callback CONTEXT Registers.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/DoStackSnapshot - Callback CONTEXT Registers.md @@ -5,9 +5,9 @@ In my initial [post](DoStackSnapshot - Exception Filters.md) about DoStackSnapsh The quick answer is that **nonvolatile (i.e., preserved), integer registers** should be valid. You don't really need many registers to walk the stack anyway. Obviously, you want a good stack pointer and instruction pointer. And hey, a frame pointer is handy when you come across an EBP-based frame in x86 (RBP on x64). These are all included in the set, of course. Specifically by architecture, you can trust these fields in your context: -x86: Edi, Esi, Ebx, Ebp, Esp, Eip -x64: Rdi, Rsi, Rbx, Rbp, Rsp, Rip, R12:R15 +x86: Edi, Esi, Ebx, Ebp, Esp, Eip +x64: Rdi, Rsi, Rbx, Rbp, Rsp, Rip, R12:R15 ia64: IntS0:IntS3, RsBSP, StIFS, RsPFS, IntSp, StIIP, StIPSR - + diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/DoStackSnapshot - Exception Filters.md b/docs/design/coreclr/profiling/davbr-blog-archive/DoStackSnapshot - Exception Filters.md index 60ce221b46000..6c7e57eb53d05 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/DoStackSnapshot - Exception Filters.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/DoStackSnapshot - Exception Filters.md @@ -7,38 +7,38 @@ For those of you diehard C# fans, you might be unaware of the existence of excep First, a little background. For the full deal, check out the MSDN Library topic on VB.NET's [try/catch/finally statements](http://msdn.microsoft.com/library/default.asp?url=/library/en-us/vblr7/html/vastmTryCatchFinally.asp). But here's an appetizer. In VB.NET you can do this: -``` -Function Negative() As Boolean - Return False -End Function - -Function Positive() As Boolean - Return True -End Function - -Sub Thrower - Throw New Exception -End Sub - -Sub Main() - Try - Thrower() - Catch ex As Exception When Negative() - MsgBox("Negative") - Catch ex As Exception When Positive() - MsgBox("Positive") - End Try -End Sub ``` +Function Negative() As Boolean + Return False +End Function -The filters are the things that come after "When". We all know that, when an exception is thrown, its type must match the type specified in a Catch clause in order for that Catch clause to be executed. "When" is a way to further restrict whether a Catch clause will be executed. Now, not only must the exception's type match, but also the When clause must evaluate to True for that Catch clause to be chosen. In the example above, when we run, we'll skip the first Catch clause (because its filter returned False), and execute the second, thus showing a message box with "Positive" in it. - -The thing you need to realize about DoStackSnapshot's behavior (indeed, CLR in general) is that the execution of a When clause is really a separate function call. In the above example, imagine we take a stack snapshot while inside Positive(). Our managed-only stack trace, as reported by DoStackSnapshot, would then look like this (stack grows up): - -Positive -Main -Thrower -Main - -It's that highlighted Main that seems odd at first. While the exception is thrown inside Thrower(), the CLR needs to execute the filter clauses to figure out which Catch wins. These filter executions are actually _function calls_. Since filter clauses don't have their own names, we just use the name of the function containing the filter clause for stack reporting purposes. Thus, the highlighted Main above is the execution of a filter clause located inside Main (in this case, "When Positive()"). When each filter clause completes, we "return" back to Thrower() to continue our search for the filter that returns True. Since this is how the call stack is built up, that's what DoStackSnapshot will report. +Function Positive() As Boolean + Return True +End Function + +Sub Thrower + Throw New Exception +End Sub + +Sub Main() + Try + Thrower() + Catch ex As Exception When Negative() + MsgBox("Negative") + Catch ex As Exception When Positive() + MsgBox("Positive") + End Try +End Sub +``` + +The filters are the things that come after "When". We all know that, when an exception is thrown, its type must match the type specified in a Catch clause in order for that Catch clause to be executed. "When" is a way to further restrict whether a Catch clause will be executed. Now, not only must the exception's type match, but also the When clause must evaluate to True for that Catch clause to be chosen. In the example above, when we run, we'll skip the first Catch clause (because its filter returned False), and execute the second, thus showing a message box with "Positive" in it. + +The thing you need to realize about DoStackSnapshot's behavior (indeed, CLR in general) is that the execution of a When clause is really a separate function call. In the above example, imagine we take a stack snapshot while inside Positive(). Our managed-only stack trace, as reported by DoStackSnapshot, would then look like this (stack grows up): + +Positive +Main +Thrower +Main + +It's that highlighted Main that seems odd at first. While the exception is thrown inside Thrower(), the CLR needs to execute the filter clauses to figure out which Catch wins. These filter executions are actually _function calls_. Since filter clauses don't have their own names, we just use the name of the function containing the filter clause for stack reporting purposes. Thus, the highlighted Main above is the execution of a filter clause located inside Main (in this case, "When Positive()"). When each filter clause completes, we "return" back to Thrower() to continue our search for the filter that returns True. Since this is how the call stack is built up, that's what DoStackSnapshot will report. diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/DoStackSnapshot - HRESULTs.md b/docs/design/coreclr/profiling/davbr-blog-archive/DoStackSnapshot - HRESULTs.md index 7d9952ff7545b..59d46109f365c 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/DoStackSnapshot - HRESULTs.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/DoStackSnapshot - HRESULTs.md @@ -5,14 +5,14 @@ Generally, corerror.h tells you all you need to know about what kinds of HRESULT ### E\_FAIL -I don't much like E\_FAIL. If DoStackSnapshot fails, you will typically see a more descriptive, custom HRESULT. However, there are regrettably a few ways DoStackSnapshot can fail where you'll see the dreaded E\_FAIL instead. From your code's point of view, you shouldn't assume E\_FAIL will always imply one of the cases below (or conversely that each of these cases will always result in E\_FAIL). But this is just good stuff to know as you develop and debug your profiler, so you don't get blindsided. - -1) No managed frames on stack - +I don't much like E\_FAIL. If DoStackSnapshot fails, you will typically see a more descriptive, custom HRESULT. However, there are regrettably a few ways DoStackSnapshot can fail where you'll see the dreaded E\_FAIL instead. From your code's point of view, you shouldn't assume E\_FAIL will always imply one of the cases below (or conversely that each of these cases will always result in E\_FAIL). But this is just good stuff to know as you develop and debug your profiler, so you don't get blindsided. + +1) No managed frames on stack + If you call DoStackSnapshot when there are no managed functions on your target thread's stack, you can get E\_FAIL. For example, if you try to walk the stack of a target thread very early on in its execution, there simply might not be any managed frames there yet. Or, if you try to walk the stack of the finalizer thread while it's waiting to do work, there will certainly be no managed frames on its stack. It's also possible that walking a stack with no managed frames on it will yield S\_OK instead of E\_FAIL (e.g., if the target thread is jit-compiling the first managed function to be called on that thread). Again, your code probably doesn't need to worry about all these cases. If we call your StackSnapshotCallback for a managed frame, you can trust that frame is there. If we don't call your StackSnapshotCallback, you can assume there are no managed frames on the stack. -2) OS kernel handling a hardware exception - +2) OS kernel handling a hardware exception + This one is less likely to happen, but it certainly can. When an app throws a hardware exception (e.g., divide by 0), the offending thread enters the Windows kernel. The kernel spends some time recording the thread's current user-mode register context, modifying some registers, and moving the instruction pointer to the user-mode exception dispatch routine. At this point the thread is ready to reenter user-mode. But if you are unlucky enough to call DoStackSnapshot while the target thread is still in the kernel doing this stuff, you will get E\_FAIL. 3) Detectably bad seed @@ -25,11 +25,11 @@ Generally, this HRESULT means that your profiler requested to abort the stack wa One of the beautiful things about running 64-bit Windows is that you can get the Windows OS to perform (native) stack walks for you. Read up on [RtlVirtualUnwind](http://msdn.microsoft.com/library/default.asp?url=/library/en-us/debug/base/rtlvirtualunwind.asp) if you're unfamiliar with this. The Windows OS has a critical section to protect a block of memory used to help perform this stack walk. So what would happen if: -- The OS's exception handling code causes a thread to walk its own stack -- The thread therefore enters this critical section -- Your profiler (via DoStackSnapshot) suspends this thread while the thread is still inside the critical section -- DoStackSnapshot uses RtlVirtualUnwind to help walk this suspended thread -- RtlVirtualUnwind (executing on the current thread) tries to enter the critical section (already owned by suspended target thread) +- The OS's exception handling code causes a thread to walk its own stack +- The thread therefore enters this critical section +- Your profiler (via DoStackSnapshot) suspends this thread while the thread is still inside the critical section +- DoStackSnapshot uses RtlVirtualUnwind to help walk this suspended thread +- RtlVirtualUnwind (executing on the current thread) tries to enter the critical section (already owned by suspended target thread) If your answer was "deadlock", congratulations! DoStackSnapshot has some code that tries to avoid this scenario, by aborting the stack walk before the deadlock can occur. When this happens, DoStackSnapshot will return CORPROF\_E\_STACKSNAPSHOT\_ABORTED. Note that this whole scenario is pretty rare, and only happens on WIN64. diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/ELT Hooks - The Basics.md b/docs/design/coreclr/profiling/davbr-blog-archive/ELT Hooks - The Basics.md index af5d867ee4a4e..bcd246837a365 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/ELT Hooks - The Basics.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/ELT Hooks - The Basics.md @@ -13,7 +13,7 @@ The CLR Profiling API allows you to hook managed functions so that your profiler [in] FunctionLeave \*pFuncLeave, [in] FunctionTailcall \*pFuncTailcall); ``` - + _(Profiler implements these…)_ ``` typedef void FunctionEnter(FunctionID funcID); @@ -30,7 +30,7 @@ The CLR Profiling API allows you to hook managed functions so that your profiler [in] FunctionLeave2 *pFuncLeave, [in] FunctionTailcall2 *pFuncTailcall); ``` - + _(Profiler implements these…)_ ``` @@ -66,7 +66,7 @@ _(Profiler calls this…)_ ``` HRESULT SetFunctionIDMapper([in] FunctionIDMapper \*pFunc); ``` - + _(Profiler implements this…)_ ``` @@ -74,7 +74,7 @@ typedef UINT_PTR __stdcall FunctionIDMapper( FunctionID funcId, BOOL *pbHookFunction); ``` - + 2. When FunctionIDMapper is called: a. Your profiler sets the pbHookFunction [out] parameter appropriately to determine whether the function identified by funcId should have ELT hooks compiled into it. @@ -92,7 +92,7 @@ The solution is “NGEN /Profile”. For example, if you run this command agains `ngen install MyAssembly.dll /Profile` - + it will NGEN MyAssembly.dll with the “Profile” flavor (also called “profiler-enhanced”). This flavor causes extra hooks to be baked in to enable features like ELT hooks, loader callbacks, managed/unmanaged code transition callbacks, and the JITCachedFunctionSearchStarted/Finished callbacks. diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/ELT Hooks - tail calls.md b/docs/design/coreclr/profiling/davbr-blog-archive/ELT Hooks - tail calls.md index 3cca8c9a9c5c1..d47fffeb084da 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/ELT Hooks - tail calls.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/ELT Hooks - tail calls.md @@ -79,7 +79,7 @@ typedef void FunctionTailcall2( COR_PRF_FRAME_INFO func); ``` -**Tip** : More than once I've seen profiler writers make the following mistake. They will take their naked assembly-language wrapper for their Enter2 and Leave2 hooks, and paste it again to use as the Tailcall2 assembly-language wrapper. The problem is they forget that the Tailcall2 hook takes a different number of parameters than the Enter2 / Leave2 hooks (or, more to the point, a different number of _bytes_ is passed on the stack to invoke the Tailcall2 hook). So, they'll take the "ret 16" at the end of their Enter2/Leave2 hook wrappers and stick that into their Tailcall2 hook wrapper, forgetting to change it to a "ret 12". Don't make the same mistake! +**Tip** : More than once I've seen profiler writers make the following mistake. They will take their naked assembly-language wrapper for their Enter2 and Leave2 hooks, and paste it again to use as the Tailcall2 assembly-language wrapper. The problem is they forget that the Tailcall2 hook takes a different number of parameters than the Enter2 / Leave2 hooks (or, more to the point, a different number of _bytes_ is passed on the stack to invoke the Tailcall2 hook). So, they'll take the "ret 16" at the end of their Enter2/Leave2 hook wrappers and stick that into their Tailcall2 hook wrapper, forgetting to change it to a "ret 12". Don't make the same mistake! It's worth noting what these parameters mean. With the Enter and Leave hooks it's pretty obvious that the parameters your hook is given (e.g., funcId) apply to the function being Entered or Left. But what about the Tailcall hook? Do the Tailcall hook's parameters describe the caller (function making the tail call) or the callee (function being tail called into)? @@ -95,7 +95,7 @@ Ok, enough dilly-dallying. What should your profiler do in its Tailcall hook? Tw The [CLRProfiler](http://www.microsoft.com/downloads/details.aspx?FamilyID=a362781c-3870-43be-8926-862b40aa0cd0&DisplayLang=en) is a great example of using Enter/Leave/Tailcall hooks to maintain shadow stacks. A shadow stack is your profiler's own copy of the current stack of function calls on a given thread at any given time. Upon Enter of a function, you push that FunctionID (and whatever other info interests you, such as arguments) onto your data structure that represents that thread's stack. Upon Leave of a function, you pop that FunctionID. This gives you a live list of managed calls in play on the thread. The CLRProfiler uses shadow stacks so that whenever the managed app being profiled chooses to allocate a new object, the CLRProfiler can know the managed call stack that led to the allocation. (Note that an alternate way of accomplishing this would be to call DoStackSnapshot at every allocation point instead of maintaining a shadow stack. Since objects are allocated so frequently, however, you'd end up calling DoStackSnapshot extremely frequently and will often see worse performance than if you had been maintaining shadow stacks in the first place.) - + OK, so when your profiler maintains a shadow stack, it's clear what your profiler should do on Enter or Leave, but what should it do on Tailcall? There are a couple ways one could imagine answering that question, but only one of them will work! Taking the example from the top of this post, imagine the stack looks like this: @@ -121,9 +121,9 @@ Method 2: On tailcall, "mark" the FunctionID at the top of your stack as needing With this strategy, for the duration of the call to Three(), the shadow stack will look like this: -Three -Helper (marked for deferred pop) -Main +Three +Helper (marked for deferred pop) +Main which some might consider more user-friendly. And as soon as Three() returns, your profiler will sneakily do a double-pop leaving just this: @@ -163,8 +163,8 @@ Method 2: Shadow stack fails At stage (4), the shadow stack looks like this: -Helper -Thread.Sleep (marked for "deferred pop") +Helper +Thread.Sleep (marked for "deferred pop") Main If you think it might be complicated to explain tail calls to your users so they can understand the Method 1 form of shadow stack presentation, just try explaining why it makes sense to present to them that Thread.Sleep() is calling Helper()! @@ -184,11 +184,11 @@ static public void Main() would yield: ``` -Helper -Thread.Sleep (marked for "deferred pop") -Thread.Sleep (marked for "deferred pop") -Thread.Sleep (marked for "deferred pop") -Thread.Sleep (marked for "deferred pop") +Helper +Thread.Sleep (marked for "deferred pop") +Thread.Sleep (marked for "deferred pop") +Thread.Sleep (marked for "deferred pop") +Thread.Sleep (marked for "deferred pop") Main ``` @@ -211,11 +211,11 @@ static public void Helper() would yield: ``` -Thread.Sleep (marked for "deferred pop") -Thread.Sleep (marked for "deferred pop") -Thread.Sleep (marked for "deferred pop") -Thread.Sleep (marked for "deferred pop") -Helper +Thread.Sleep (marked for "deferred pop") +Thread.Sleep (marked for "deferred pop") +Thread.Sleep (marked for "deferred pop") +Thread.Sleep (marked for "deferred pop") +Helper Main ``` @@ -337,7 +337,7 @@ ildasm Class1.exe Inside ildasm, use File.Dump to generate a text file that contains a textual representation of the IL from Class1.exe. Call it Class1WithTail.il. Open up that file and add the tail. prefix just before the call you want optimized into a tail call (see highlighted yellow for changes): ``` -.method private hidebysig static int32 +.method private hidebysig static int32 Helper(int32 i) cil managed { ~~// Code size 45 (0x2d) @@ -386,5 +386,5 @@ If you didn't learn anything, I hope you at least got some refreshing sleep than - Since some managed functions may tail call into native helper functions inside the CLR (for which you won't get an Enter hook notification), your Tailcall hook should treat the tail call as if it were a Leave, and not depend on the next Enter hook correlating to the target of the last tail call. With shadow stacks, for example, this means you should simply pop the calling function off your shadow stack in your Tailcall hook. - Since tail calls can be elusive to find in practice, it's well worth your while to use ildasm/ilasm to manufacture explicit tail calls so you can step through your Tailcall hook and test its logic. -_David has been a developer at Microsoft for over 70 years (allowing for his upcoming time-displacement correction). He joined Microsoft in 2079, first starting in the experimental time-travel group. His current assignment is to apply his knowledge of the future to eliminate the "Wait for V3" effect customers commonly experience in his source universe. By using Retroactive Hindsight-ellisenseTM his goal is to "get it right the first time, this time" in a variety of product groups._ +_David has been a developer at Microsoft for over 70 years (allowing for his upcoming time-displacement correction). He joined Microsoft in 2079, first starting in the experimental time-travel group. His current assignment is to apply his knowledge of the future to eliminate the "Wait for V3" effect customers commonly experience in his source universe. By using Retroactive Hindsight-ellisenseTM his goal is to "get it right the first time, this time" in a variety of product groups._ diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/Generics and Your Profiler.md b/docs/design/coreclr/profiling/davbr-blog-archive/Generics and Your Profiler.md index 149a0c0a7888c..d5ddb5dc777c8 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/Generics and Your Profiler.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/Generics and Your Profiler.md @@ -7,7 +7,7 @@ If you’re writing a profiler that you expect to run against CLR 2.0 or greater Let's say a C# developer writes code like this: - + ``` class MyClass { @@ -57,7 +57,7 @@ HRESULT GetFunctionInfo2([in] FunctionID funcId, typeArgs[]: This is the array of **type arguments** to MyClass\.Foo\. So this will be an array of only one element: the ClassID for float. (The int in MyClass\ is a type argument to MyClass, not to Foo, and you would only see that when you call GetClassIDInfo2 with MyClass\.) -## +## ## GetClassIDInfo2 @@ -89,20 +89,20 @@ To understand why, it’s necessary to understand an internal optimization the C For now, the important point is that, once we’re inside JITted code that is shared across different generic instantiations, how can one know which instantiation is the actual one that caused the current invocation? Well, in many cases, the CLR may not have that data readily lying around. However, as a profiler, you can capture this information and pass it back to the CLR when it needs it. This is done through a COR\_PRF\_FRAME\_INFO. There are two ways your profiler can get a COR\_PRF\_FRAME\_INFO: -1. Via slow-path Enter/Leave/Tailcall probes -2. Via your DoStackSnapshot callback +1. Via slow-path Enter/Leave/Tailcall probes +2. Via your DoStackSnapshot callback I lied. #1 is really the only way for your profiler to get a COR\_PRF\_FRAME\_INFO. #2 may seem like a way—at least the profiling API suggests that the CLR gives your profiler a COR\_PRF\_FRAME\_INFO in the DSS callback—but unfortunately the COR\_PRF\_FRAME\_INFO you get there is pretty useless. I suspect the COR\_PRF\_FRAME\_INFO parameter was added to the signature of the profiler’s DSS callback function so that it could “light up” at some point in the future when we could work on finding out how to create a sufficiently helpful COR\_PRF\_FRAME\_INFO during stack walks. However, that day has not yet arrived. So if you want a COR\_PRF\_FRAME\_INFO, you’ll need to grab it—and use it from—your slow-path Enter/Leave/Tailcall probe. With a valid COR\_PRF\_FRAME\_INFO, GetFunctionInfo2 will give you helpful, specific ClassIDs in the typeArgs [out] array and pClassId [out] parameter. If the profiler passes NULL for COR\_PRF\_FRAME\_INFO, here’s what you can expect: -- If you’re using CLR V2, pClassId will point to NULL if the function sits on _any_ generic class (shared or not). In CLR V4 this got a little better, and you’ll generally only see pClassId point to NULL if the function sits on a “shared” generic class (instantiated with reference types). - - Note: If it’s impossible for the profiler to have a COR\_PRF\_FRAME\_INFO handy to pass to GetFunctionInfo2, and that results in a NULL \*pClassID, the profiler can always use the metadata interfaces to find the mdTypeDef token of the class on which the function resides for the purposes of pretty-printing the class name to the user. Of course, the profiler will not know the specific instantiating type arguments that were used on the class in that case. -- the typeArgs [out] array will contain the ClassID for **System.\_\_Canon** , rather than the actual instantiating type(s), if the function itself is generic and is instantiated with reference type argument(s). +- If you’re using CLR V2, pClassId will point to NULL if the function sits on _any_ generic class (shared or not). In CLR V4 this got a little better, and you’ll generally only see pClassId point to NULL if the function sits on a “shared” generic class (instantiated with reference types). + - Note: If it’s impossible for the profiler to have a COR\_PRF\_FRAME\_INFO handy to pass to GetFunctionInfo2, and that results in a NULL \*pClassID, the profiler can always use the metadata interfaces to find the mdTypeDef token of the class on which the function resides for the purposes of pretty-printing the class name to the user. Of course, the profiler will not know the specific instantiating type arguments that were used on the class in that case. +- the typeArgs [out] array will contain the ClassID for **System.\_\_Canon** , rather than the actual instantiating type(s), if the function itself is generic and is instantiated with reference type argument(s). It’s worth noting here that there is a bug in GetFunctionInfo2, in that the [out] pClassId you get for the class containing the function can be wrong with generic virtual functions. Take a look at [this forum post](http://social.msdn.microsoft.com/Forums/en-US/netfxtoolsdev/thread/ed6f972f-712a-48df-8cce-74f8951503fa/) for more information and a workaround. -## +## ## ClassIDs & FunctionIDs vs. Metadata Tokens @@ -120,14 +120,14 @@ If you got curious, and ran such a profiler under the debugger, you could use th If your profiler performs IL rewriting, it’s important to understand that it must NOT do instantiation-specific IL rewriting. Huh? Let’s take an example. Suppose you’re profiling code that uses MyClass\.Foo\ and MyClass\.Foo\. Your profiler will see two JITCompilationStarted callbacks, and will have two opportunities to rewrite the IL. Your profiler may call GetFunctionInfo2 on those two FunctionIDs and determine that they’re two different instantiations of the same generic function. You may then be tempted to make use of the fact that one is instantiated with float, and the other with long, and provide different IL for the two different JIT compilations. The problem with this is that the IL stored in metadata, as well as the IL provided to SetILFunctionBody, is always specified relative to the mdMethodDef. (Remember, SetILFunctionBody doesn’t take a FunctionID as input; it takes an mdMethodDef.) And it’s the profiler’s responsibility always to specify the same rewritten IL for any given mdMethodDef no matter how many times it’s JITted. And a given mdMethodDef can be JITted multiple times due to a number of reasons: -- Two threads simultaneously trying to call the same function for the first time (and thus both trying to JIT that function) -- Strange dependency chains involving class constructors (more on this in the MSDN [reference topic](http://msdn.microsoft.com/en-us/library/ms230586.aspx)) -- Multiple AppDomains using the same (non-domain-neutral) function -- And of course multiple generic instantiations! +- Two threads simultaneously trying to call the same function for the first time (and thus both trying to JIT that function) +- Strange dependency chains involving class constructors (more on this in the MSDN [reference topic](http://msdn.microsoft.com/en-us/library/ms230586.aspx)) +- Multiple AppDomains using the same (non-domain-neutral) function +- And of course multiple generic instantiations! Regardless of the reason, the profiler must always rewrite with exactly the same IL. Otherwise, an invariant in the CLR will have been broken by the profiler, and you will get strange, undefined behavior as a result. And no one wants that. - + That’s it! Hopefully this gives you a good idea of how the CLR Profiling API will behave in the face of generic classes and functions, and what is expected of your profiler. diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/Metadata Tokens, Run-Time IDs, and Type Loading.md b/docs/design/coreclr/profiling/davbr-blog-archive/Metadata Tokens, Run-Time IDs, and Type Loading.md index 1b1e2f74d7a3c..fded71588a335 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/Metadata Tokens, Run-Time IDs, and Type Loading.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/Metadata Tokens, Run-Time IDs, and Type Loading.md @@ -31,16 +31,16 @@ Yes, that is a good example. You are an astute reader. Memory profilers that w # Going from metadata token to run-time ID -# +# -# +# As I mentioned above, the safest way to do this is to build up your own map and do reverse-lookups as necessary. If that scheme meets your needs, then by all means do that, and stop reading! But in the cases where this is insufficient, you may need to resort to using GetFunctionFromToken(AndTypeArgs) and GetClassFromToken(AndTypeArgs). There is no simple, foolproof way to use these APIs safely, but here is your guideline: **Never call GetFunctionFromToken(AndTypeArgs) and GetClassFromToken(AndTypeArgs) unless you’re certain the relevant types have been loaded.** (“Relevant types” include the ClassID containing the FunctionID whose mdMethodDef you pass to GetFunctionFromToken(AndTypeArgs), and the ClassID whose mdTypeDef you pass to GetClassFromToken(AndTypeArgs).) If these types have not been loaded, _you may cause them to be loaded now_! This is bad because: -- This is an easy way to crash the app. Trying to load a type at the wrong time could cause cycles, causing infinite loops (depending on what your profiler does in response to class load notifications) or outright crashes. For example, trying to load a type while its containing assembly is still in an early phase of loading is a great and fun way to crash the CLR. -- You will impact the behavior of the app. If you’re lucky enough not to crash the app, you’ve still impacted its behavior, by causing types to get loaded in a different order than they normally would. Any impact to app behavior like this makes it difficult for your users to reproduce problems that they are trying to use your tool to diagnose, or may hide problems that they don’t discover until they run their application outside of your tool. +- This is an easy way to crash the app. Trying to load a type at the wrong time could cause cycles, causing infinite loops (depending on what your profiler does in response to class load notifications) or outright crashes. For example, trying to load a type while its containing assembly is still in an early phase of loading is a great and fun way to crash the CLR. +- You will impact the behavior of the app. If you’re lucky enough not to crash the app, you’ve still impacted its behavior, by causing types to get loaded in a different order than they normally would. Any impact to app behavior like this makes it difficult for your users to reproduce problems that they are trying to use your tool to diagnose, or may hide problems that they don’t discover until they run their application outside of your tool. ## Determining whether a class was loaded @@ -54,14 +54,14 @@ MyRetType MyClass::MyFunction(MyArgumentType myArgumentType) then you can be reasonably assured that the following are loaded: -- MyClass -- MyArgumentType (if it’s a value-type) -- MyRetType (if it’s a value-type) -- For any class you know is loaded, so should be: - - its base class - - its value-type fields (not necessarily reference-type fields!) - - implemented interfaces - - value-type generic type arguments (and even reference-type generic type arguments in the case of MyClass) +- MyClass +- MyArgumentType (if it’s a value-type) +- MyRetType (if it’s a value-type) +- For any class you know is loaded, so should be: + - its base class + - its value-type fields (not necessarily reference-type fields!) + - implemented interfaces + - value-type generic type arguments (and even reference-type generic type arguments in the case of MyClass) So much for stacks. What if you encounter an instance of a class on the heap? Surely the class is loaded then, right? Well, probably. If you encounter an object on heap just after GC (inside **GarbageCollectionFinished** , before you return), it should be safe to inspect the class’s layout, and then peek through ObjectIDs to see the values of their fields. @@ -73,7 +73,7 @@ In general, a lot of the uncertainty above comes from types stored in NGENd modu Now is a good time remind you that, not only is it dangerous to inspect run-time IDs too early (i.e., before they load); it’s also dangerous to inspect run-time IDs too late (i.e., after they **unload** ). For example, if you store ClassIDs and FunctionIDs for later use, and use them “too late”, you can easily crash the CLR. The profiling API does pretty much no validation of anything (in many cases, it’s incapable of doing so without using up significant amounts of memory to maintain lookup tables for everything). So we generally take any run-time ID that you pass to ICorProfilerInfo\* methods, cast it to an internal CLR structure ptr, and go boom if the ID is bad. -There is no way to just ask the CLR if a FunctionID or ClassID is valid. Indeed, classes could get unloaded, and new classes loaded, and your ClassID may now refer to a totally different (valid) class. +There is no way to just ask the CLR if a FunctionID or ClassID is valid. Indeed, classes could get unloaded, and new classes loaded, and your ClassID may now refer to a totally different (valid) class. You need to keep track of the unloads yourself. You are notified when run-time IDs go out of scope (today, this happens at the level of an AppDomain unloading or a collectible assembly unloading—in both cases all IDs “contained” in the unloading thing are now invalid). Once a run-time ID is out of scope, you are not allowed to pass that run-time ID back to the CLR. In fact, you should consider whether thread synchronization will be necessary in your profiler to maintain this invariant. For example, if a run-time ID gets unloaded on thread A, you’re still not allowed to pass that run-time ID back to the CLR on thread B. So you may need to block on a critical section in thread A during the \*UnloadStarted / AppDomainShutdown\* callbacks, to prevent them from returning to the CLR until any uses of the contained IDs in thread B are finished. @@ -91,16 +91,16 @@ ResolveTypeRef doesn’t know about any of this—it was never designed to be us If you absolutely need to resolve refs to defs, your best bet may be to use your own algorithm which will be as accurate as you can make it, under the circumstances, and which will never try to locate a module that hasn’t been loaded yet. That means that you shouldn’t try to resolve a ref to a def if that def hasn’t actually been loaded into a type by the CLR. Consider using an algorithm similar to the following: -1. Get the AssemblyRef from the TypeRef to get to the name, public key token and version of the assembly where the type should reside. -2. Enumerate all loaded modules that the Profiling API has notified you of (or via [EnumModules](http://msdn.microsoft.com/en-us/library/dd490890)) (you can filter out a specific AppDomain at this point if you want). -3. In each enumerated module, search for a TypeDef with the same name and namespace as the TypeRef (IMetaDataImport::FindTypeDefByName) -4. Pay attention to **type forwarding**! Once you find the TypeDef, it may actually be an “exported” type, in which case you will need to follow the trail to the next module. Read toward the bottom of [this post](Type Forwarding.md) for more info. +1. Get the AssemblyRef from the TypeRef to get to the name, public key token and version of the assembly where the type should reside. +2. Enumerate all loaded modules that the Profiling API has notified you of (or via [EnumModules](http://msdn.microsoft.com/en-us/library/dd490890)) (you can filter out a specific AppDomain at this point if you want). +3. In each enumerated module, search for a TypeDef with the same name and namespace as the TypeRef (IMetaDataImport::FindTypeDefByName) +4. Pay attention to **type forwarding**! Once you find the TypeDef, it may actually be an “exported” type, in which case you will need to follow the trail to the next module. Read toward the bottom of [this post](Type Forwarding.md) for more info. The above can be a little bit smarter by paying attention to what order you choose to search through the modules: -- First search for the TypeDef in assemblies which exactly match the name, public key token and version for the AssemblyRef. -- If that fails, then search through assemblies matching name and public key token (where the version is higher than the one supplied – this can happen for Framework assemblies). -- If that fails, then search through all the other assemblies +- First search for the TypeDef in assemblies which exactly match the name, public key token and version for the AssemblyRef. +- If that fails, then search through assemblies matching name and public key token (where the version is higher than the one supplied – this can happen for Framework assemblies). +- If that fails, then search through all the other assemblies I must warn you that the above scheme is **not tested and not supported. Use at your own risk!** @@ -108,7 +108,7 @@ I must warn you that the above scheme is **not tested and not supported. Use at Although I cannot comment on what will or will not be in any particular future version of the CLR, I can tell you that it is clear to us on the CLR team that we have work to do, to make dealing with metadata tokens and their corresponding run-time type information easier from the profiling API. After all, it doesn’t take a rocket scientist to read the above and conclude that it does take a rocket scientist to actually follow all this advice. So for now, enjoy the fact that what you do is really hard, making you difficult to replace, and thus your job all the more secure. You’re welcome. - + Special thanks to David Wrighton and Karel Zikmund, who have helped considerably with all content in this entry around the type system and metadata. diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/Profiler Detach.md b/docs/design/coreclr/profiling/davbr-blog-archive/Profiler Detach.md index 987ee24507b4a..4b87a0018c442 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/Profiler Detach.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/Profiler Detach.md @@ -9,9 +9,9 @@ The Detach feature allows a profiler that the user is finished with to be unload Not every V4 profiler is allowed to detach from a running process. The general rule is that a profiler which has caused an irreversible impact in the process it’s profiling should _not_ attempt to detach. The CLR catches the following cases: -- Profiler set immutable flags (COR\_PRF\_MONITOR\_IMMUTABLE) via SetEventMask. -- Profiler performed IL rewriting via SetILFunctionBody -- Profiler used the Enter/Leave/Tailcall methods to add callouts to its probes +- Profiler set immutable flags (COR\_PRF\_MONITOR\_IMMUTABLE) via SetEventMask. +- Profiler performed IL rewriting via SetILFunctionBody +- Profiler used the Enter/Leave/Tailcall methods to add callouts to its probes If the profiler attempts to detach after doing any of the above, the CLR will disallow the attempt (see below for details). @@ -25,20 +25,20 @@ There’s one, deceptively simple-looking method the profiler calls to detach it So, the sequence works like this: -1. The profiler **deactivates all the ways control could enter the profiler** (aside from the CLR Profiling API itself). This means removing any Windows callbacks, timer interrupts, hijacking, disabling any other components that may try to call into the profiler DLL, etc. The profiler must also wait for all threads that it has created (e.g., a sampling thread, inter-process communication threads, a ForceGC thread, etc.) to exit, except for the one thread the profiler will use to call RequestProfilerDetach(). Any threads created by the CLR, of course, should not be tampered with. - - Your profiler must block here until all those ways control can enter your profiler DLL have truly been deactivated (e.g., just setting a flag to disable sampling may not be enough if your sampling thread is currently performing a sample already in progress). You must coordinate with all components of your profiler so that your profiler DLL knows that everything is verifiably deactivated, and all profiler-created threads have exited (except for the one thread the profiler will use to call RequestProfilerDetach()). -2. If the profiler will use a thread of its own creation to call RequestProfilerDetach() (which is the typical way this API will be called), that thread must own a reference onto the profiler’s DLL, via its own **LoadLibrary()** call that it makes on the profiler DLL. This can either be done when the thread starts up, or now, or sometime in between. But that reference must be added at some point before calling RequestProfilerDetach(). -3. Profiler calls ICorProfilerInfo3:: **RequestProfilerDetach** (). - - (A) This causes the CLR to (synchronously) set internal state to avoid making any further calls into the profiler via the ICorProfilerCallback\* interfaces, and to refuse any calls from the profiler into ICorProfilerInfo\* interfaces (such calls will now fail early with CORPROF\_E\_PROFILER\_DETACHING). - - (B) The CLR also (asynchronously) begins a period safety check on another thread to determine when all pre-existing calls into the profiler via the ICorProfilerCallback\* interfaces have returned. - - Note: It is expected that your profiler will not make any more “unsolicited” calls back into the CLR via any interfaces (ICorProfilerInfo\*, hosting, metahost, metadata, etc.). By “unsolicited”, I’m referring to calls that didn’t originate from the CLR via ICorProfilerCallback\*. In other words, it’s ok for the profiler to continue to do its usual stuff in its implementation of ICorProfilerCallback methods (which may include calling into the CLR via ICorProfilerInfo\*), as the CLR will wait for those outer ICorProfilerCallback methods to return as per 3B. But the profiler must not make any other calls into the CLR (i.e., that are not sandwiched inside an ICorProfilerCallback call). You should already have deactivated any component of your profiler that would make such unsolicited calls in step 1. -4. Assuming the above RequestProfilerDetach call was made on a profiler-created thread, that thread must now call [**FreeLibraryAndExitThread**](http://msdn.microsoft.com/en-us/library/ms683153(VS.85).aspx)**()**. (Note: that’s a specialized Windows API that combines FreeLibrary() and ExitThread() in such a way that races can be avoided—do not call FreeLibrary() and ExitThread() separately.) -5. On another thread, the CLR continues its **period safety checks** from 3B above. Eventually the CLR determines that there are no more ICorProfilerCallback\* interface calls currently executing, and it is therefore safe to unload the profiler. -6. The CLR calls ICorProfilerCallback3:: **ProfilerDetachSucceeded**. The profiler can use this signal to know that it’s about to be unloaded. It’s expected that the profiler will do very little in this callback—probably just notifying the user that the profiler is about to be unloaded. Any cleanup the profiler needs to do should already have been done during step 1. -7. CLR makes the necessary number of **Release** () calls on ICorProfilerCallback3. The reference count should go down to 0 at this point, and the profiler may deallocate any memory it had previously allocated to support its callback implementation. -8. CLR calls **FreeLibrary** () on the profiler DLL. This should be the last reference to the profiler’s DLL, and your DLL will now be unloaded. - - Note: in some cases, it’s theoretically possible that step 4 doesn’t happen until _after_ this step, in which case the last reference to the profiler’s DLL will actually be released by your profiler’s thread that called RequestProfilerDetach and then FreeLibraryAndExitThread. That’s because steps 1-4 happen on your profiler’s thread, and steps 5-8 happen on a dedicated CLR thread (for detaching profilers) sometime after step 3 is completed. So there’s a race between step 4 and all of steps 5-8. There’s no harm in this, so long as you’re playing nice by doing your own LoadLibrary and FreeLibraryAndExitThread as described above. -9. The CLR adds an Informational entry to the Application Event Log noting that the profiler has been unloaded. The CLR is now ready to service any profiler attach requests. +1. The profiler **deactivates all the ways control could enter the profiler** (aside from the CLR Profiling API itself). This means removing any Windows callbacks, timer interrupts, hijacking, disabling any other components that may try to call into the profiler DLL, etc. The profiler must also wait for all threads that it has created (e.g., a sampling thread, inter-process communication threads, a ForceGC thread, etc.) to exit, except for the one thread the profiler will use to call RequestProfilerDetach(). Any threads created by the CLR, of course, should not be tampered with. + - Your profiler must block here until all those ways control can enter your profiler DLL have truly been deactivated (e.g., just setting a flag to disable sampling may not be enough if your sampling thread is currently performing a sample already in progress). You must coordinate with all components of your profiler so that your profiler DLL knows that everything is verifiably deactivated, and all profiler-created threads have exited (except for the one thread the profiler will use to call RequestProfilerDetach()). +2. If the profiler will use a thread of its own creation to call RequestProfilerDetach() (which is the typical way this API will be called), that thread must own a reference onto the profiler’s DLL, via its own **LoadLibrary()** call that it makes on the profiler DLL. This can either be done when the thread starts up, or now, or sometime in between. But that reference must be added at some point before calling RequestProfilerDetach(). +3. Profiler calls ICorProfilerInfo3:: **RequestProfilerDetach** (). + - (A) This causes the CLR to (synchronously) set internal state to avoid making any further calls into the profiler via the ICorProfilerCallback\* interfaces, and to refuse any calls from the profiler into ICorProfilerInfo\* interfaces (such calls will now fail early with CORPROF\_E\_PROFILER\_DETACHING). + - (B) The CLR also (asynchronously) begins a period safety check on another thread to determine when all pre-existing calls into the profiler via the ICorProfilerCallback\* interfaces have returned. + - Note: It is expected that your profiler will not make any more “unsolicited” calls back into the CLR via any interfaces (ICorProfilerInfo\*, hosting, metahost, metadata, etc.). By “unsolicited”, I’m referring to calls that didn’t originate from the CLR via ICorProfilerCallback\*. In other words, it’s ok for the profiler to continue to do its usual stuff in its implementation of ICorProfilerCallback methods (which may include calling into the CLR via ICorProfilerInfo\*), as the CLR will wait for those outer ICorProfilerCallback methods to return as per 3B. But the profiler must not make any other calls into the CLR (i.e., that are not sandwiched inside an ICorProfilerCallback call). You should already have deactivated any component of your profiler that would make such unsolicited calls in step 1. +4. Assuming the above RequestProfilerDetach call was made on a profiler-created thread, that thread must now call [**FreeLibraryAndExitThread**](http://msdn.microsoft.com/en-us/library/ms683153(VS.85).aspx)**()**. (Note: that’s a specialized Windows API that combines FreeLibrary() and ExitThread() in such a way that races can be avoided—do not call FreeLibrary() and ExitThread() separately.) +5. On another thread, the CLR continues its **period safety checks** from 3B above. Eventually the CLR determines that there are no more ICorProfilerCallback\* interface calls currently executing, and it is therefore safe to unload the profiler. +6. The CLR calls ICorProfilerCallback3:: **ProfilerDetachSucceeded**. The profiler can use this signal to know that it’s about to be unloaded. It’s expected that the profiler will do very little in this callback—probably just notifying the user that the profiler is about to be unloaded. Any cleanup the profiler needs to do should already have been done during step 1. +7. CLR makes the necessary number of **Release** () calls on ICorProfilerCallback3. The reference count should go down to 0 at this point, and the profiler may deallocate any memory it had previously allocated to support its callback implementation. +8. CLR calls **FreeLibrary** () on the profiler DLL. This should be the last reference to the profiler’s DLL, and your DLL will now be unloaded. + - Note: in some cases, it’s theoretically possible that step 4 doesn’t happen until _after_ this step, in which case the last reference to the profiler’s DLL will actually be released by your profiler’s thread that called RequestProfilerDetach and then FreeLibraryAndExitThread. That’s because steps 1-4 happen on your profiler’s thread, and steps 5-8 happen on a dedicated CLR thread (for detaching profilers) sometime after step 3 is completed. So there’s a race between step 4 and all of steps 5-8. There’s no harm in this, so long as you’re playing nice by doing your own LoadLibrary and FreeLibraryAndExitThread as described above. +9. The CLR adds an Informational entry to the Application Event Log noting that the profiler has been unloaded. The CLR is now ready to service any profiler attach requests. ## RequestProfilerDetach @@ -46,17 +46,17 @@ Let’s dive a little deeper into the method you call to detach your profiler: `HRESULT RequestProfilerDetach([in] DWORD dwExpectedCompletionMilliseconds);` - + First off, you’ll notice this is on ICorProfilerInfo3, the interface your profiler DLL uses, in the same process as your profilee. Although the AttachProfiler API is called from outside the process, this detach method is called from in-process. Why? Well, the general rule with profilers is that _everything_ is done in-process. Attach is an exception because your profiler isn’t in the process yet. You need to somehow trigger your profiler to load, and you can’t do that from a process in which you have no code executing yet! So Attach is sort of a boot-strapping API that has to be called from a process of your own making. Once your profiler DLL is up and running, it is in charge of everything, from within the same process as the profilee. And detach is no exception. Now with that said, it’s probably typical that your profiler will detach in response to an end user action—probably via some GUI that you ship that runs in its own process. So a case could be made that the CLR team could have made your life easier by providing an out-of-process way to do a detach, so that your GUI could easily trigger a detach, just as it triggered the attach. However, you could make that same argument about all the ways you might want to control a profiler via a GUI, such as these commands: -- Do a GC now and show me the heap -- Dial up or down the sampling frequency -- Change which instrumented methods should log their invocations -- Start / stop monitoring exceptions -- etc. +- Do a GC now and show me the heap +- Dial up or down the sampling frequency +- Change which instrumented methods should log their invocations +- Start / stop monitoring exceptions +- etc. The point is, if you have a GUI to control your profiler, then you probably already have an inter-process mechanism for the GUI to communicate with your profiler DLL. So think of “detach” as yet one more command your GUI will send to your profiler DLL. @@ -66,10 +66,10 @@ The CLR uses that value in its Sleep() statement that sits between each periodic Until the profiler can be unloaded, it will be considered “loaded” (though deactivated in the sense that no new callback methods will be called). This prevents any new profiler from attaching. - + Ok, that wraps up how detaching works. If you remember only one thing from this post, remember that it’s really easy to cause an application you profile to AV after your profiler unloads if you’re not careful. While the CLR tracks outgoing ICorProfilerCallback\* calls, it does not track any other way that control can enter your profiler DLL. _Before_ your profiler calls RequestProfilerDetach: -- You must take care to deactivate all other ways control can enter your profiler DLL -- Your profiler must block until all those other ways control can enter your profiler DLL have verifiably been deactivated +- You must take care to deactivate all other ways control can enter your profiler DLL +- Your profiler must block until all those other ways control can enter your profiler DLL have verifiably been deactivated diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/Profiler stack walking Basics and beyond.md b/docs/design/coreclr/profiling/davbr-blog-archive/Profiler stack walking Basics and beyond.md index 682ad7bf583ee..21ea9dcf77009 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/Profiler stack walking Basics and beyond.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/Profiler stack walking Basics and beyond.md @@ -17,31 +17,31 @@ It’s nice to be able to get call stacks whenever you want them. But with powe So let’s take a look at the beast. Here’s what your profiler calls (you can find this in ICorProfilerInfo2, in corprof.idl): ``` -HRESULT DoStackSnapshot( - [in] ThreadID thread, - [in] StackSnapshotCallback *callback, - [in] ULONG32 infoFlags, - [in] void *clientData, - [in, size_is(contextSize), length_is(contextSize)] BYTE context[], +HRESULT DoStackSnapshot( + [in] ThreadID thread, + [in] StackSnapshotCallback *callback, + [in] ULONG32 infoFlags, + [in] void *clientData, + [in, size_is(contextSize), length_is(contextSize)] BYTE context[], [in] ULONG32 contextSize); ``` And here’s what the CLR calls on your profiler (you can also find this in corprof.idl). You’ll pass a pointer to your implementation of this function in the callback parameter above. ``` -typedef HRESULT \_\_stdcall StackSnapshotCallback( - FunctionID funcId, - UINT_PTR ip, - COR_PRF_FRAME_INFO frameInfo, - ULONG32 contextSize, - BYTE context[], +typedef HRESULT \_\_stdcall StackSnapshotCallback( + FunctionID funcId, + UINT_PTR ip, + COR_PRF_FRAME_INFO frameInfo, + ULONG32 contextSize, + BYTE context[], void *clientData); ``` It’s like a sandwich. When your profiler wants to walk the stack, you call DoStackSnapshot. Before the CLR returns from that call, it calls your StackSnapshotCallback several times, once for each managed frame (or run of unmanaged frames) on the stack: ``` -Profiler calls DoStackSnapshot. Whole wheat bread - CLR calls StackSnapshotCallback. Lettuce frame (“leaf”-most frame, ha) - CLR calls StackSnapshotCallback. Tomato frame - CLR calls StackSnapshotCallback. Bacon frame (root or “main” frame) +Profiler calls DoStackSnapshot. Whole wheat bread + CLR calls StackSnapshotCallback. Lettuce frame (“leaf”-most frame, ha) + CLR calls StackSnapshotCallback. Tomato frame + CLR calls StackSnapshotCallback. Bacon frame (root or “main” frame) CLR returns back to profiler from DoStackSnapshot Whole wheat bread ``` @@ -77,42 +77,42 @@ Before I continue from this exciting cliffhanger, a brief interlude. Everyone k Now that we’re speaking the same language. Let’s look at a mixed-mode stack: -| +| Unmanaged | -| +| D (Managed) | -| +| Unmanaged | -| +| C (Managed) | -| +| B (Managed) | -| +| Unmanaged | -| +| A (Managed) | -| +| Main (Managed) @@ -120,7 +120,7 @@ Main (Managed) Stepping back a bit, it’s worthwhile to understand why DoStackSnapshot exists in the first place. It’s there to help you walk _managed_ frames on the stack. If you tried to walk managed frames yourself, you would get unreliable results, particularly on 32 bits, because of some wacky calling conventions used in managed code. The CLR understands these calling conventions, and DoStackSnapshot is therefore in a uniquely suitable position to help you decode them. However, DoStackSnapshot is not a complete solution if you want to be able to walk the entire stack, including unmanaged frames. Here’s where you have a choice: -1. Do nothing and report stacks with “unmanaged holes” to your users, or +1. Do nothing and report stacks with “unmanaged holes” to your users, or 2. Write your own unmanaged stack walker to fill in those holes. When DoStackSnapshot comes across a block of unmanaged frames, it calls your StackSnapshotCallback with funcId=0. (I think I mentioned this before, but I’m not sure you were listening.) If you’re going with option #1 above, simply do nothing in your callback when funcId=0. We’ll call you again for the next managed frame and you can wake up at that point. @@ -145,77 +145,77 @@ But before you get too deep, note that the issue of whether and how to seed a st For the truly adventurous profiler that is doing an asynchronous, cross-thread, seeded stack walk while filling in the unmanaged holes, here’s what it would look like. -| +| -Block of -Unmanaged +Block of +Unmanaged Frames - | -1. You suspend the target thread (target thread’s suspend count is now 1) -2. You get the target thread’s current register context -3. You determine if the register context points to unmanaged code (e.g., call ICorProfilerInfo2::GetFunctionFromIP(), and see if you get back a 0 FunctionID) + | +1. You suspend the target thread (target thread’s suspend count is now 1) +2. You get the target thread’s current register context +3. You determine if the register context points to unmanaged code (e.g., call ICorProfilerInfo2::GetFunctionFromIP(), and see if you get back a 0 FunctionID) 4. In this case the register context does point to unmanaged code, so you perform an unmanaged stack walk until you find the top-most managed frame (D) | -| +| -Function D +Function D (Managed) - | + | 1. You call DoStackSnapshot with your seed context. CLR suspends target thread again: its suspend count is now 2. Our sandwich begins. 1. CLR calls your StackSnapshotCallback with FunctionID for D. | -| +| -Block of -Unmanaged +Block of +Unmanaged Frames - | + | 1. CLR calls your StackSnapshotCallback with FunctionID=0. You’ll need to walk this block yourself. You can stop when you hit the first managed frame, or you can cheat: delay your unmanaged walk until sometime after your next callback, as the next callback will tell you exactly where the next managed frame begins (and thus where your unmanaged walk should end). | -| +| -Function C +Function C (Managed) - | + | 1. CLR calls your StackSnapshotCallback with FunctionID for C. | -| +| -Function B +Function B (Managed) - | + | 1. CLR calls your StackSnapshotCallback with FunctionID for B. | -| +| -Block of -Unmanaged +Block of +Unmanaged Frames - | + | 1. CLR calls your StackSnapshotCallback with FunctionID=0. Again, you’ll need to walk this block yourself. | -| +| -Function A +Function A (Managed) - | + | 1. CLR calls your StackSnapshotCallback with FunctionID for A. | -| +| -Main +Main (Managed) - | -1. CLR calls your StackSnapshotCallback with FunctionID for Main. + | +1. CLR calls your StackSnapshotCallback with FunctionID for Main. 2. DoStackSnapshot “resumes” target thread (its suspend count is now 1) and returns. Our sandwich is complete. 1. You resume target thread (its suspend count is now 0, so it’s resumed for real). @@ -253,8 +253,8 @@ Problem 2: _While you suspend the target thread, the target thread tries to susp “Come on! Like that could really happen.” Believe it or not, if: -- Your app runs on a multiproc box, and -- Thread A runs on one proc and thread B runs on another, and +- Your app runs on a multiproc box, and +- Thread A runs on one proc and thread B runs on another, and - A tries to suspend B while B tries to suspend A then it’s possible that both suspensions win, and both threads end up suspended. It’s like the line from that movie: “Multiproc means never having to say, ‘I lose.’”. Since each thread is waiting for the other to wake it up, they stay suspended forever. It is the most romantic of all deadlocks. @@ -265,7 +265,7 @@ Ok, so, why is the target thread trying to suspend you anyway? Well, in a hypot A less obvious reason that the target thread might try to suspend your walking thread is due to the inner workings of the CLR. The CLR suspends application threads to help with things like garbage collection. So if your walker tries to walk (and thus suspend) the thread doing the GC at the same time the thread doing the GC tries to suspend your walker, you are hosed. -The way out, fortunately, is quite simple. The CLR is only going to suspend threads it needs to suspend in order to do its work. Let’s label the two threads involved in your stack walk: Thread A = the current thread (the thread performing the walk), and Thread B = the target thread (the thread whose stack is walked). As long as Thread A has _never executed managed code_ (and is therefore of no use to the CLR during a garbage collection), then the CLR will never try to suspend Thread A. This means it’s safe for your profiler to have Thread A suspend Thread B, as the CLR will have no reason for B to suspend A. +The way out, fortunately, is quite simple. The CLR is only going to suspend threads it needs to suspend in order to do its work. Let’s label the two threads involved in your stack walk: Thread A = the current thread (the thread performing the walk), and Thread B = the target thread (the thread whose stack is walked). As long as Thread A has _never executed managed code_ (and is therefore of no use to the CLR during a garbage collection), then the CLR will never try to suspend Thread A. This means it’s safe for your profiler to have Thread A suspend Thread B, as the CLR will have no reason for B to suspend A. If you’re writing a sampling profiler, it’s quite natural to ensure all of this. You will typically have a separate thread of your own creation that responds to timer interrupts and walks the stacks of other threads. Call this your sampler thread. Since you create this sampler thread yourself and have control over what it executes, the CLR will have no reason to suspend it. And this also fixes the “poorly-written profiler” example above, since this sampler thread is the only thread of your profiler trying to walk or suspend other threads. So your profiler will never try to directly suspend the sampler thread. @@ -281,7 +281,7 @@ Lucky for you, the CLR notifies profilers when a thread is about to be destroyed Rule 2: Block in ThreadDestroyed callback until that thread’s stack walk is complete - + **_GC helps you make a cycle_** @@ -293,25 +293,25 @@ A while back I mentioned that it is clearly a bad idea for your profiler to hold Example #1: -- Thread A successfully grabs and now owns one of your profiler locks -- Thread B = thread doing the GC -- Thread B calls profiler’s GarbageCollectionStarted callback -- Thread B blocks on the same profiler lock -- Thread A executes GetClassFromTokenAndTypeArgs() -- GetClassFromTokenAndTypeArgs tries to trigger a GC, but notices a GC is already in progress. -- Thread A blocks, waiting for GC currently in progress (Thread B) to complete +- Thread A successfully grabs and now owns one of your profiler locks +- Thread B = thread doing the GC +- Thread B calls profiler’s GarbageCollectionStarted callback +- Thread B blocks on the same profiler lock +- Thread A executes GetClassFromTokenAndTypeArgs() +- GetClassFromTokenAndTypeArgs tries to trigger a GC, but notices a GC is already in progress. +- Thread A blocks, waiting for GC currently in progress (Thread B) to complete - But B is waiting for A, because of your profiler lock. ![](media/gccycle.jpg) Example #2: -- Thread A successfully grabs and now owns one of your profiler locks -- Thread B calls profiler’s ModuleLoadStarted callback -- Thread B blocks on the same profiler lock -- Thread A executes GetClassFromTokenAndTypeArgs() -- GetClassFromTokenAndTypeArgs triggers a GC -- Thread A (now doing the GC) waits for B to be ready to be collected +- Thread A successfully grabs and now owns one of your profiler locks +- Thread B calls profiler’s ModuleLoadStarted callback +- Thread B blocks on the same profiler lock +- Thread A executes GetClassFromTokenAndTypeArgs() +- GetClassFromTokenAndTypeArgs triggers a GC +- Thread A (now doing the GC) waits for B to be ready to be collected - But B is waiting for A, because of your profiler lock. ![](media/deadlock.jpg) @@ -332,10 +332,10 @@ Yeah, if you read carefully, you’ll see that this rule never even mentions DoS I’m just about tuckered out, so I’m gonna close this out with a quick summary of the highlights. Here's what's important to remember. -1. Synchronous stack walks involve walking the current thread in response to a profiler callback. These don’t require seeding, suspending, or any special rules. Enjoy! -2. Asynchronous walks require a seed if the top of the stack is unmanaged code not part of a PInvoke or COM call. You supply a seed by directly suspending the target thread and walking it yourself, until you find the top-most managed frame. If you don’t supply a seed in this case, DoStackSnapshot will just return a failure code to you. -3. If you directly suspend threads, remember that only a thread that has never run managed code can suspend another thread -4. When doing asynchronous walks, always block in your ThreadDestroyed callback until that thread’s stack walk is complete +1. Synchronous stack walks involve walking the current thread in response to a profiler callback. These don’t require seeding, suspending, or any special rules. Enjoy! +2. Asynchronous walks require a seed if the top of the stack is unmanaged code not part of a PInvoke or COM call. You supply a seed by directly suspending the target thread and walking it yourself, until you find the top-most managed frame. If you don’t supply a seed in this case, DoStackSnapshot will just return a failure code to you. +3. If you directly suspend threads, remember that only a thread that has never run managed code can suspend another thread +4. When doing asynchronous walks, always block in your ThreadDestroyed callback until that thread’s stack walk is complete 5. Do not hold a lock while your profiler calls into a CLR function that can trigger a GC Finally, a note of thanks to the rest of the CLR Profiling API team, as the writing of these rules is truly a team effort. And special thanks to Sean Selitrennikoff who provided an earlier incarnation of much of this content. diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/ReJIT - The Basics.md b/docs/design/coreclr/profiling/davbr-blog-archive/ReJIT - The Basics.md index 746a45d38c0c8..440931e7dca13 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/ReJIT - The Basics.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/ReJIT - The Basics.md @@ -2,13 +2,13 @@ This post is organized in chronological order, telling what your profiler should be doing at the following times in the process: -- Startup Time -- ModuleLoadFinished Time -- RequestReJIT Time -- Actual ReJIT Time -- RequestRevert Time +- Startup Time +- ModuleLoadFinished Time +- RequestReJIT Time +- Actual ReJIT Time +- RequestRevert Time + - ## Startup Time @@ -22,11 +22,11 @@ Typically, your profiler will also create a new thread at this point, call it yo ## ModuleLoadFinished Time -### +### -### +### -### +### ### Metadata Changes @@ -46,19 +46,19 @@ This won’t make much sense until you’ve read the next section, but I’m pla Now imagine your user has turned some dial on your out-of-process GUI, to request that some functions get instrumented (or re-instrumented (or re-re-instrumented (or …))). This results in a signal sent to your in-process profiler component. Your ReJIT Thread now knows it must call **RequestReJIT**. You can call this API once in bulk for a list of functions to ReJIT. Note that functions are expressed in terms of ModuleID + mdMethodDef metadata tokens. A few things to note about this: -- You request that all instantiations of a generic function (or function on a generic class) get ReJITted with a single ModuleID + mdMethodDef pair. You cannot request a specific instantiation be ReJITted, or provide instantiation-specific IL. This is nothing new, as classic first-JIT-instrumentation should never be customized per instantiation either. But the ReJIT API is designed with this restriction in mind, as you’ll see later on. -- ModuleID is specific to one AppDomain for unshared modules, or the SharedDomain for shared modules. Thus: - - If ModuleID is shared, then your request will simultaneously apply to all domains using the shared copy of this module (and thus function) - - If ModuleID is unshared, then your request will apply only to the single AppDomain using this module (and function) - - Therefore, if you want this ReJIT request to apply to _all unshared copies_ of this function: - - You’ll need to include all such ModuleIDs in this request. - - And… any _future_ unshared loads of this module will result in new ModuleIDs. So as those loads happen, you’ll need to make further calls to RequestReJIT with the new ModuleIDs to ensure those copies get ReJITted as well. - - This is optional, and only need be done if you truly want this ReJIT request to apply to all unshared copies of the function. You’re perfectly welcome to ReJIT only those unshared copies you want (and / or the shared copy). - - Now you can re-read the “Re-Request Prior ReJITs” section above. :-) +- You request that all instantiations of a generic function (or function on a generic class) get ReJITted with a single ModuleID + mdMethodDef pair. You cannot request a specific instantiation be ReJITted, or provide instantiation-specific IL. This is nothing new, as classic first-JIT-instrumentation should never be customized per instantiation either. But the ReJIT API is designed with this restriction in mind, as you’ll see later on. +- ModuleID is specific to one AppDomain for unshared modules, or the SharedDomain for shared modules. Thus: + - If ModuleID is shared, then your request will simultaneously apply to all domains using the shared copy of this module (and thus function) + - If ModuleID is unshared, then your request will apply only to the single AppDomain using this module (and function) + - Therefore, if you want this ReJIT request to apply to _all unshared copies_ of this function: + - You’ll need to include all such ModuleIDs in this request. + - And… any _future_ unshared loads of this module will result in new ModuleIDs. So as those loads happen, you’ll need to make further calls to RequestReJIT with the new ModuleIDs to ensure those copies get ReJITted as well. + - This is optional, and only need be done if you truly want this ReJIT request to apply to all unshared copies of the function. You’re perfectly welcome to ReJIT only those unshared copies you want (and / or the shared copy). + - Now you can re-read the “Re-Request Prior ReJITs” section above. :-) -## +## -### +### ### More on AppDomains @@ -81,18 +81,18 @@ You may have noticed that you have read a whole lot of words so far, but we have IF this is the first generic instantiation to ReJIT, for a given RequestReJIT call (or this is not a generic at all), THEN: - CLR calls **GetReJITParameters** - - This callback passes an ICorProfilerFunctionControl to your profiler. Inside your implementation of GetReJITParameters (and no later!) you may call into ICorProfilerFunctionControl to provide the instrumented IL and codegen flags that the CLR should use during the ReJIT - - Therefore it is here where you may: - - Call GetILFunctionBody - - Add any new LocalVarSigTokens to the function’s module’s metadata. (You may not do any other metadata modifications here, though!) - - Rewrite the IL to your specifications, passing it to ICorProfilerFunctionControl::SetILFunctionBody. - - You may NOT call ICorProfilerInfo::SetILFunctionBody for a ReJIT! This API still exists if you want to do classic first-JIT IL rewriting only. - - Note that GetReJITParameters expresses the function getting compiled in terms of the ModuleID + mdMethodDef pair you previously specified to RequestReJIT, and _not_ in terms of a FunctionID. As mentioned before, you may not provide instantiation-specific IL! + - This callback passes an ICorProfilerFunctionControl to your profiler. Inside your implementation of GetReJITParameters (and no later!) you may call into ICorProfilerFunctionControl to provide the instrumented IL and codegen flags that the CLR should use during the ReJIT + - Therefore it is here where you may: + - Call GetILFunctionBody + - Add any new LocalVarSigTokens to the function’s module’s metadata. (You may not do any other metadata modifications here, though!) + - Rewrite the IL to your specifications, passing it to ICorProfilerFunctionControl::SetILFunctionBody. + - You may NOT call ICorProfilerInfo::SetILFunctionBody for a ReJIT! This API still exists if you want to do classic first-JIT IL rewriting only. + - Note that GetReJITParameters expresses the function getting compiled in terms of the ModuleID + mdMethodDef pair you previously specified to RequestReJIT, and _not_ in terms of a FunctionID. As mentioned before, you may not provide instantiation-specific IL! And then, for all ReJITs (regardless of whether they are for the first generic instantiation or not): -- CLR calls **ReJITCompilationStarted** -- CLR calls **ReJITCompilationFinished** +- CLR calls **ReJITCompilationStarted** +- CLR calls **ReJITCompilationFinished** These callbacks express the function getting compiled in terms of FunctionID + ReJITID. (ReJITID is simply a disambiguating value so that each ReJITted version of a function instantiation can be uniquely identified via FunctionID + ReJITID.) Your profiler doesn’t need to do anything in the above callbacks if it doesn’t want to. They just notify you that the ReJIT is occurring, and get called for each generic instantiation (or non-generic) that gets ReJITted. @@ -114,12 +114,12 @@ Note that RequestRevert allows you to revert back to the original JITted IL, and If there are any errors with performing the ReJIT, you will be notified by the dedicated callback ICorProfilerCallback4::ReJITError(). Errors can happen at a couple times: -- RequestReJIT Time: These are fundamental errors with the request itself. This can include bad parameter values, requesting to ReJIT dynamic (Ref.Emit) code, out of memory, etc. If errors occur here, you’ll get a callback to your implementation of ReJITError(), sandwiched inside your call to RequestReJIT on your ReJIT Thread. -- Actual ReJIT Time: These are errors we don’t encounter until actually trying to ReJIT the function itself. When these later errors occur, your implementation of ReJITError() is called on whatever CLR thread encountered the error. +- RequestReJIT Time: These are fundamental errors with the request itself. This can include bad parameter values, requesting to ReJIT dynamic (Ref.Emit) code, out of memory, etc. If errors occur here, you’ll get a callback to your implementation of ReJITError(), sandwiched inside your call to RequestReJIT on your ReJIT Thread. +- Actual ReJIT Time: These are errors we don’t encounter until actually trying to ReJIT the function itself. When these later errors occur, your implementation of ReJITError() is called on whatever CLR thread encountered the error. You’ll note that ReJITError can provide you not only the ModuleID + mdMethodDef pair that caused the error, but optionally a FunctionID as well. Depending on the nature of the error occurred, the FunctionID may be available, so that your profiler may know the exact generic instantiation involved with the error. If FunctionID is null, then the error was fundamental to the generic function itself (and thus occurred for all instantiations). - + Ok, that about covers it on how your profiler is expected to use ReJIT. As you can see, there are several different tasks your profiler needs to do at different times to get everything right. But I trust you, you’re smart. diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/Sample A Signature Blob Parser for your Profiler.md b/docs/design/coreclr/profiling/davbr-blog-archive/Sample A Signature Blob Parser for your Profiler.md index 34be8594412d4..52f5fd1b95593 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/Sample A Signature Blob Parser for your Profiler.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/Sample A Signature Blob Parser for your Profiler.md @@ -3,42 +3,42 @@ If your profiler plays with metadata, you've undoubtedly come across signature blobs. They’re used to encode type information for method definitions & references, local variables, and a whole lot more. They’re wonderfully compact, recursively versatile, and sometimes, well, challenging to parse. Fortunately, [Rico Mariani](https://docs.microsoft.com/en-us/archive/blogs/ricom/) was feeling generous one day, and churned out a simple parser that can read these types of signatures: -MethodDefSig -MethodRefSig -StandAloneMethodSig -FieldSig -PropertySig +MethodDefSig +MethodRefSig +StandAloneMethodSig +FieldSig +PropertySig LocalVarSig -Here are the files: -[sigparse.cpp](samples/sigparse.cpp) (Rico's signature parser) -[sigformat.cpp](samples/sigformat.cpp) (An example extension to the parser) +Here are the files: +[sigparse.cpp](samples/sigparse.cpp) (Rico's signature parser) +[sigformat.cpp](samples/sigformat.cpp) (An example extension to the parser) [PlugInToYourProfiler.cpp](samples/PlugInToYourProfiler.cpp) (Example code to plug the extension into your profiler) Open up **sigparse.cpp** in your favorite editor and take a look at the grammar at the top. The grammar comes from the ECMA CLI spec. Jonathan Keljo has a [link](http://blogs.msdn.com/jkeljo/archive/2005/08/04/447726.aspx) to it from his blog. This tells you the types of signature blobs the parser can handle. Sigparse.cpp is structured without any dependencies on any headers, so you can easily absorb it into your profiler project. There are two things you will need to do to make use of the code. I provided examples of each of these in the download above to help you out: -1. You will **extend the code** to make use of the parsed components of the signature however you like. Perhaps you’ll build up your own internal structures based on what you find. Or maybe you’ll build a pretty-printer that displays method prototypes in the managed language of your choice. -2. You will then **call the code** to perform the parse on signature blobs you encounter while profiling. +1. You will **extend the code** to make use of the parsed components of the signature however you like. Perhaps you’ll build up your own internal structures based on what you find. Or maybe you’ll build a pretty-printer that displays method prototypes in the managed language of your choice. +2. You will then **call the code** to perform the parse on signature blobs you encounter while profiling. ## Extending the code Simply derive a new class from SigParser, and override the virtual functions. The functions you override are events to be handled as the parser traverses the signature in top-down fashion. For example, when the parser encounters a MethodDef, you might see calls to your overrides of: -NotifyBeginMethod() - NotifyParamCount() - NotifyBeginRetType() - NotifyBeginType() - NotifyTypeSimple() - NotifyEndType() - NotifyEndRetType() - NotifyBeginParam() - NotifyBeginType() - NotifyTypeSimple() - NotifyEndType() - NotifyEndParam() - _… (more parameter notifications occur here if more parameters exist)_ +NotifyBeginMethod() + NotifyParamCount() + NotifyBeginRetType() + NotifyBeginType() + NotifyTypeSimple() + NotifyEndType() + NotifyEndRetType() + NotifyBeginParam() + NotifyBeginType() + NotifyTypeSimple() + NotifyEndType() + NotifyEndParam() + _… (more parameter notifications occur here if more parameters exist)_ NotifyEndMethod() And yes, generics are handled as well. diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/Tail call JIT conditions.md b/docs/design/coreclr/profiling/davbr-blog-archive/Tail call JIT conditions.md index 194c0ba517aaf..ad3e937e8e3a0 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/Tail call JIT conditions.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/Tail call JIT conditions.md @@ -8,28 +8,28 @@ _First, Grant talked about the 64-bit JITs (one for x64, one for ia64):_ For the 64-bit JIT, we tail call whenever we’re allowed to. Here’s what prevents us from tail calling (in no particular order): - We inline the call instead (we never inline recursive calls to the same method, but we will tail call them) -- The call/callvirt/calli is followed by something other than nop or ret IL instructions. -- The caller or callee return a value type. -- The caller and callee return different types. -- The caller is synchronized (MethodImplOptions.Synchronized). -- The caller is a shared generic method. -- The caller has imperative security (a call to Assert, Demand, Deny, etc.). -- The caller has declarative security (custom attributes). +- The call/callvirt/calli is followed by something other than nop or ret IL instructions. +- The caller or callee return a value type. +- The caller and callee return different types. +- The caller is synchronized (MethodImplOptions.Synchronized). +- The caller is a shared generic method. +- The caller has imperative security (a call to Assert, Demand, Deny, etc.). +- The caller has declarative security (custom attributes). - The caller is varargs -- The callee is varargs. +- The callee is varargs. - The runtime forbids the JIT to tail call. (_There are various reasons the runtime may disallow tail calling, such as caller / callee being in different assemblies, the call going to the application's entrypoint, any conflicts with usage of security features, and other esoteric cases._) -- The il did not have the tail. prefix and we are not optimizing (the profiler and debugger control this) -- The il did not have the tail. prefix and the caller had a localloc instruction (think alloca or dynamic stack allocation) -- The caller is getting some GS security cookie checks -- The il did not have the tail. prefix and a local or parameter has had its address taken (ldarga, or ldloca) +- The il did not have the tail. prefix and we are not optimizing (the profiler and debugger control this) +- The il did not have the tail. prefix and the caller had a localloc instruction (think alloca or dynamic stack allocation) +- The caller is getting some GS security cookie checks +- The il did not have the tail. prefix and a local or parameter has had its address taken (ldarga, or ldloca) - The caller is the same as the callee and the runtime disallows inlining - The callee is invoked via stub dispatch (_i.e., via intermediate code that's generated at runtime to optimize certain types of calls_). -- For x64 we have these additional restrictions: +- For x64 we have these additional restrictions: - - The callee has one or more parameters that are valuetypes of size 3,5,6,7 or \>8 bytes - - The callee has more than 4 arguments (don’t forget to count the this pointer, generics, etc.) and more than the caller + - The callee has one or more parameters that are valuetypes of size 3,5,6,7 or \>8 bytes + - The callee has more than 4 arguments (don’t forget to count the this pointer, generics, etc.) and more than the caller - For all of the parameters passed on the stack the GC-ness must match between the caller and callee. (_"GC-ness" means the state of being a pointer to the beginning of an object managed by the GC, or a pointer to the interior of an object managed by the GC (e.g., a byref field), or neither (e.g., an integer or struct)._) -- For ia64 we have this additional restriction: +- For ia64 we have this additional restriction: - Any of the callee arguments do not get passed in a register. diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/Type Forwarding.md b/docs/design/coreclr/profiling/davbr-blog-archive/Type Forwarding.md index f503b612cda31..0be1cb40932e2 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/Type Forwarding.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/Type Forwarding.md @@ -9,11 +9,11 @@ Type forwarding is nothing new. However, in CLR V4, we are enabling type forwar The example I’ll use where the .NET Framework uses type forwarding is the TimeZoneInfo class. In CLR V4, TimeZoneInfo is now forwarded from System.Core.dll to mscorlib.dll. If you open the CLR V4 copy of System.Core.dll in ildasm and choose Dump, you'll see the following: -| +| ``` .class extern /*27000004*/ forwarder System.TimeZoneInfo { - .assembly extern mscorlib /*23000001*/ + .assembly extern mscorlib /*23000001*/ } ``` | @@ -28,15 +28,15 @@ This walkthrough assumes you have .NET 4.0 or later installed **and** an older r Code up a simple C# app that uses System.TimeZoneInfo: ``` -namespace test -{ - class Class1 - { - static void Main(string[] args) - { - System.TimeZoneInfo ti = null; - } - } +namespace test +{ + class Class1 + { + static void Main(string[] args) + { + System.TimeZoneInfo ti = null; + } + } } ``` @@ -49,7 +49,7 @@ csc /debug+ /o- /r:"C:\Program Files (x86)\Reference Assemblies\Microsoft\Framew Again, be sure you’re using an old csc.exe from, say, a NET 3.5 installation. To verify, open up Class1.exe in ildasm, and take a look at Main(). It should look something like this: ``` -.method /*06000001*/ private hidebysig static +.method /*06000001*/ private hidebysig static void Main(string[] args) cil managed { .entrypoint @@ -83,14 +83,14 @@ The above will force Class1.exe to bind against .NET 4.0 Beta 1. And when it co To experiment with forwarding your own types, the process is: -- Create Version 1 of your library - - - Create version 1 of your library assembly that defines your type (MyLibAssemblyA.dll) - - Create an app that references your type in MyLibAssemblyA.dll (MyClient.exe) -- Create version 2 of your library - - - Recompile MyLibAssemblyA.dll to forward your type elsewhere (MyLibAssemblyB.dll) - - Don’t recompile MyClient.exe. Let it still think the type is defined in MyLibAssemblyA.dll. +- Create Version 1 of your library + + - Create version 1 of your library assembly that defines your type (MyLibAssemblyA.dll) + - Create an app that references your type in MyLibAssemblyA.dll (MyClient.exe) +- Create version 2 of your library + + - Recompile MyLibAssemblyA.dll to forward your type elsewhere (MyLibAssemblyB.dll) + - Don’t recompile MyClient.exe. Let it still think the type is defined in MyLibAssemblyA.dll. ### Version 1 @@ -140,9 +140,9 @@ Ok, time to upgrade! ### Version 2 Time goes by, your library is growing, and its time to split it into two DLLs. Gotta move Foo into the new DLL. Save this into MyLibAssemblyB.cs ``` -using System; -public class Foo -{ +using System; +public class Foo +{ } ``` @@ -170,7 +170,7 @@ Foo, MyLibAssemblyB, Version=0.0.0.0, Culture=neutral, PublicKeyToken=null And this all despite the fact that MyClient.exe still believes that Foo lives in MyLibAssemblyA: ``` -.method /*06000001*/ public hidebysig static +.method /*06000001*/ public hidebysig static void Main() cil managed { .entrypoint @@ -200,4 +200,4 @@ However, type forwarding is important to understand if your profiler needs to fo In any case, whether you think your profiler will be affected by type forwarding, be sure to test, test, test! - \ No newline at end of file + diff --git a/docs/design/features/DotNetCore-SharedPackageStore.md b/docs/design/features/DotNetCore-SharedPackageStore.md index 45b7aaa01828d..ad60f8e8a5dd6 100644 --- a/docs/design/features/DotNetCore-SharedPackageStore.md +++ b/docs/design/features/DotNetCore-SharedPackageStore.md @@ -24,7 +24,7 @@ The package store can be either a global system-wide folder or a dotnet.exe rela + netcoreapp2.1 + refs + netcoreapp2.0 - + netcoreapp2.1 + + netcoreapp2.1 ``` The layout within `netcoreapp*` folders is a NuGet cache layout. @@ -34,7 +34,7 @@ The layout within `netcoreapp*` folders is a NuGet cache layout. To compose the layout of the shared package store, we will use a dotnet command called `dotnet store`. We expect the *hosting providers* (ex: Antares) to use the command to prime their machines and framework authors who want to provide *pre-optimized package archives* create the compressed archive layouts. -The layout is composed from a list of package names and versions specified as xml: +The layout is composed from a list of package names and versions specified as xml: **Roslyn Example** ```xml @@ -72,7 +72,7 @@ The output folder will be consumed by the runtime by adding to the `DOTNET_SHARE # Building apps with shared packages -The current mechanism to build applications that share assemblies is by not specifying a RID in the project file. Then, a portable app model is assumed and assemblies that are part of Microsoft.NETCore.App are found under the `dotnet` install root. With shared package store, applications have the ability to filter any set of packages from their publish output. Thus the decision of a portable or a standalone application is not made at the time of project authoring but is instead done at publish time. +The current mechanism to build applications that share assemblies is by not specifying a RID in the project file. Then, a portable app model is assumed and assemblies that are part of Microsoft.NETCore.App are found under the `dotnet` install root. With shared package store, applications have the ability to filter any set of packages from their publish output. Thus the decision of a portable or a standalone application is not made at the time of project authoring but is instead done at publish time. ## Project Authoring We will by default treat `Microsoft.NETCore.App` as though `type: platform` is always specified, thus requiring no explicit RID specification by the user. It will be an `ERROR` to specify a RID in the csproj file using the `` tag. diff --git a/docs/design/features/IJW-activation.md b/docs/design/features/IJW-activation.md index 4f63f13274188..4d0fe9c173750 100644 --- a/docs/design/features/IJW-activation.md +++ b/docs/design/features/IJW-activation.md @@ -8,7 +8,7 @@ To support any C++/CLI users that wish to use .NET Core, the runtime and hosting * Load the appropriate version of .NET Core for the assembly if a .NET Core instance is not running, or validate that the currently running .NET Core instance can satisfy the assemblies requirements. * Load the (already-in-memory) assembly into the runtime. * Patch the vtfixup table tokens to point to JIT stubs. - + ## Design IJW activation has a variety of hard problems associated with it, mainly with loading in mixed mode assemblies that are not the application. diff --git a/docs/design/features/Linux-Hugepage-Crossgen2.md b/docs/design/features/Linux-Hugepage-Crossgen2.md index dcc37774b3987..32083cc39eba8 100644 --- a/docs/design/features/Linux-Hugepage-Crossgen2.md +++ b/docs/design/features/Linux-Hugepage-Crossgen2.md @@ -1,7 +1,7 @@ Configuring Huge Pages for loading composite binaries using CoreCLR on Linux ---- -Huge pages can provide performance benefits to reduce the cost of TLB cache misses when +Huge pages can provide performance benefits to reduce the cost of TLB cache misses when executing code. In general, the largest available wins may be achieved by enabling huge pages for use by the GC, which will dominate the memory use in the process, but in some circumstances, if the application is sufficiently large, there may be a benefit to using @@ -16,7 +16,7 @@ images using the hugetlbfs. Doing some requires several steps. 2. The composite image must be copied into a hugetlbfs filesystem which is visible to the .NET process instead of the composite image being loaded from the normal path. - IMPORTANT: The composite image must NOT be located in the normal path next to the application binary, or that file will be used instead of the huge page version. - The environment variable `COMPlus_NativeImageSearchPaths` must be set to point at the location of the hugetlbfs in use. For instance, `COMPlus_NativeImageSearchPaths` might be set to `/var/lib/hugetlbfs/user/USER/pagesize-2MB` - - As the cp command does not support copying into a hugetlbfs due to lack of support for the write syscall in that file system, a custom copy application must be used. A sample application that may be used to perform this task has a source listing in Appendix A. + - As the cp command does not support copying into a hugetlbfs due to lack of support for the write syscall in that file system, a custom copy application must be used. A sample application that may be used to perform this task has a source listing in Appendix A. 3. The machine must be configured to have sufficient huge pages available in the appropriate huge page pool. The memory requirements of huge page PE loading are as follows. - Sufficient pages to hold the unmodified copy of the composite image in the hugetlbfs. These pages will be used by the initial copy which emplaces the composite image into huge pages. - By default the runtime will map each page of the composite image using a MAP_PRIVATE mapping. This will require that the maximum number of huge pages is large enough to hold a completely separate copy of the image as loaded. @@ -62,7 +62,7 @@ int main(int argc, char** argv) printf("fdSrc fstat failed\n"); return 1; } - + addrSrc = mmap(0, st.st_size, PROT_READ | PROT_WRITE, MAP_SHARED, fdSrc, 0); if (addrSrc == MAP_FAILED) { diff --git a/docs/design/features/OnStackReplacement.md b/docs/design/features/OnStackReplacement.md index 905696875046e..757eb05dcba08 100644 --- a/docs/design/features/OnStackReplacement.md +++ b/docs/design/features/OnStackReplacement.md @@ -120,7 +120,7 @@ while the old code is active in some stack frames. An implementation must come up with solutions to several related sub problems, which we describe briefly here, and in more detail below. -* **Patchpoints** : Identify where in the original method OSR is possible. +* **Patchpoints** : Identify where in the original method OSR is possible. We will use the term _patchpoint_ to describe a particular location in a method's code that supports OSR transitions. * **Triggers** : Determine what will trigger an OSR transition @@ -258,13 +258,13 @@ PatchpointHelper(int ppID, int* counter) switch (s) { - case Unknown: - *counter = initialThreshold; + case Unknown: + *counter = initialThreshold; SetState(s, Active); return; - case Active: - *counter = checkThreshold; + case Active: + *counter = checkThreshold; SetState(s, Pending); RequestAlternative(ppID); return; @@ -273,7 +273,7 @@ PatchpointHelper(int ppID, int* counter) *counter = checkThreshold; return; - case Ready: + case Ready: Transition(...); // does not return } } @@ -483,7 +483,7 @@ this is to just leave the original frame in place, and have the OSR frame #### 3.4.1 The Prototype The original method conditionally calls to the patchpoint helper at -patchpoints. The helper will return if there is no transition. +patchpoints. The helper will return if there is no transition. For a transition, the helper will capture context and virtually unwind itself and the original method from the stack to recover callee-save register values @@ -554,7 +554,7 @@ frame pointers. When control is executing in a funclet there are effectively two activation records on the stack that share a single frame: the parent frame and the funclet frame. The funclet frame is largely a stub frame and most of the frame -state is kept in the parent frame. +state is kept in the parent frame. These two frames are not adjacent; they are separated by some number of runtime frames. This means it is going to be difficult for our system to handle @@ -799,7 +799,7 @@ G_M6138_IG03: FFC9 dec ecx 894DF0 mov dword ptr [rbp-10H], ecx 837DF000 cmp dword ptr [rbp-10H], 0 // ... > 0 ? - 7F0E jg SHORT G_M6138_IG05 + 7F0E jg SHORT G_M6138_IG05 G_M6138_IG04: ;; bbWeight=0.01 488D4DF0 lea rcx, bword ptr [rbp-10H] // &patchpointCounter @@ -910,7 +910,7 @@ For example: 5F pop rdi 4883C448 add rsp, 72 5D pop rbp - C3 ret + C3 ret ``` with unwind info: ``` diff --git a/docs/design/features/StringDeduplication.md b/docs/design/features/StringDeduplication.md index edd3d0abd9a70..fa69d2c21b889 100644 --- a/docs/design/features/StringDeduplication.md +++ b/docs/design/features/StringDeduplication.md @@ -14,18 +14,18 @@ Dedup - string deduplication is often shortened to dedup in this document. This is an opt-in feature and should have no performance penalty when it’s off. And by default it’s off. -When it’s on, we aim to – +When it’s on, we aim to – - Only deduplicate strings in old generations of the GC heap. - Not increase the STW pauses for ephemeral GCs. - Not regress string allocation speed. -- Provide static analysis and runtime checks to detect patterns incompatible with string deduping. This is required to enable customers to opt-in into this feature with confidence. +- Provide static analysis and runtime checks to detect patterns incompatible with string deduping. This is required to enable customers to opt-in into this feature with confidence. ## Details -#### **History** +#### **History** -The string deduplication feature has been brought up before. See [runtime issue #9022](https://github.com/dotnet/runtime/issues/9022) for discussion. +The string deduplication feature has been brought up before. See [runtime issue #9022](https://github.com/dotnet/runtime/issues/9022) for discussion. And a proof of concept was gracefully [attempted](https://github.com/dotnet/coreclr/pull/15135) by [@Rattenkrieg](https://github.com/Rattenkrieg) before. But it was incomplete and the design didn’t have the kind of perf characteristics desired – it had most of the logic in GC vs outside GC. @@ -33,9 +33,9 @@ An example of a user implemented string deduplication is Roslyn’s [StringTable #### **Customer impact estimation and validation** -As a general rule we want to have this for all features we add to the runtime. +As a general rule we want to have this for all features we add to the runtime. -Issue #[9022](https://github.com/dotnet/runtime/issues/9022) It mentioned some general data: +Issue #[9022](https://github.com/dotnet/runtime/issues/9022) It mentioned some general data: “The expectation is that typical apps have 20% of their GC heap be strings. Some measurements we have seen is that for at least some applications, 10-30% of strings all may be duplicated, so this might save 2-3% of the GC heap. Not huge, but the feature is not that difficult either.” @@ -48,9 +48,9 @@ There are 2 sources of data we could get – #### **Design outline** -This is an opt in feature. When the runtime detects it’s turned on, it creates a dedup thread to do the work. +This is an opt in feature. When the runtime detects it’s turned on, it creates a dedup thread to do the work. -Detection of duplicated strings is done by looking into a hash table. The key into this hash table is the hash code of the content of a string. Detailed description of this detection is later in this doc. +Detection of duplicated strings is done by looking into a hash table. The key into this hash table is the hash code of the content of a string. Detailed description of this detection is later in this doc. As the dedup thread goes through the old generations linearly, it looks for references to a string object (denoted by the method table) and either calculates or looks up the hash code of that string to see if it already exists in the hash table. If so it will attempt to change the reference to point to that string with a CAS operation. If this fails, which means some other thread changed the reference at the mean time, we simply ignore this and move on. We expect the CAS failure rate to be very low. @@ -58,17 +58,17 @@ Since the new string reference we will write to the heap has the exact same type The dedup hash table acts as weak references to the strings. Depending on the scenario we might choose to null out these weak references or not (if it’s more performant to rebuild the hash table). If we we do the former these weak references would be treated as short weak handles so the following will happen before we scan for finalization - -- During BGC final mark phase we will need to null out the strings that are not marked in the hash table. This can be made concurrent. +- During BGC final mark phase we will need to null out the strings that are not marked in the hash table. This can be made concurrent. - During a full blocking GC we will need to null out the strings that are not marked in the hash table, and relocate the ones that got promoted if we are doing a compacting GC. **Alternate design points** -- Should we create multiple threads to do the work? +- Should we create multiple threads to do the work? -Deduping can be done leisurely, so it doesn’t merit having multiple threads. +Deduping can be done leisurely, so it doesn’t merit having multiple threads. -- Can we use an existing thread to do the work on? +- Can we use an existing thread to do the work on? The finalizer thread is something that’s idling most of the time. However there are already plenty of types of work scheduled to potentially run on the finalizer thread so adding yet another thing, especially an opt in feature, can get messy. @@ -80,9 +80,9 @@ Only strings allocated on the managed heap will be considered for deduplication. Currently calling GetHashCode of a string calculates a 32-bit hash code. This is not stored anywhere, unlike the default hash code that’s stored either in the syncblk or a syncblk entry, depending whether the syncblk is also used by something else like locking. As the deduping thread goes through the heap it will calculate the 32-bit hash code and actually install it. -However, a 32-bit hash code means we always need to check for collision by actually comparing the string content if the hash code is the same. And for large strings having to compare the string content could be very costly. For LOH compaction we already allocate a padding object for each large object (which currently takes up at most 0.4% of LOH space on 64-bit). We could make this padding object 1-ptr size larger and store the address of the string it’s deduped too. Likewise we can also use this to store the fact “this is the copy the hash table keeps track of so no need to dedup”. This way we can avoid having to do the detection multiple times for the same string. Below illustrates a scenario where large strings are deduplicated. +However, a 32-bit hash code means we always need to check for collision by actually comparing the string content if the hash code is the same. And for large strings having to compare the string content could be very costly. For LOH compaction we already allocate a padding object for each large object (which currently takes up at most 0.4% of LOH space on 64-bit). We could make this padding object 1-ptr size larger and store the address of the string it’s deduped too. Likewise we can also use this to store the fact “this is the copy the hash table keeps track of so no need to dedup”. This way we can avoid having to do the detection multiple times for the same string. Below illustrates a scenario where large strings are deduplicated. -`pad | s0 | pad | s1 | pad | s0_1` +`pad | s0 | pad | s1 | pad | s0_1` `obj0 (-> s0) | obj1 (-> s0_1) | obj2 (->s1) | obj3 (->s0_1) | obj4 (->s1) ` @@ -90,12 +90,12 @@ Each string obj, ie, s*, is a string on LOH and has a padding object in front of s0_1 has the same content as s0. s1 has the same hash code but not the same content. -"obj->s" means obj points to a string object s, or has a ref to s. So obj0 has a ref to s0, obj1 has a ref to s0_1, obj2 has a ref to s1 and so on. +"obj->s" means obj points to a string object s, or has a ref to s. So obj0 has a ref to s0, obj1 has a ref to s0_1, obj2 has a ref to s1 and so on. 1. As we go through the heap, we see obj0 which points to s0. 2. s0’s hash is calculated which we use to look into the hash table. 3. We see that no entries exist for that hash so we create an entry for it and in s0’s padding indicates that it’s stored in the hash table, ie, it’s the copy we keep. -4. Then we see obj1 which points to s0_1 whose hash doesn’t exist yet. We calculate the hash for s0_1, and see that there’s an entry for this hash already in the hash table, now we compare the content and see that it’s the same, now we store s0 in the padding object before s0_1 and change obj1’s ref to point to s0. +4. Then we see obj1 which points to s0_1 whose hash doesn’t exist yet. We calculate the hash for s0_1, and see that there’s an entry for this hash already in the hash table, now we compare the content and see that it’s the same, now we store s0 in the padding object before s0_1 and change obj1’s ref to point to s0. 5. Then we see obj2 and calculate s1’s hash. We notice an entry already exists for that hash so we compare the content and the content is not the same as s0’s. So we enter s1 into the hash table and indicate that it’s stored in the hash table. 6. Then we see obj3, and s0_1 indicates that it should be deduped to s0 so we change obj3’s ref to point to s0_1 right away. 7. Then we see obj4 which points to s1 and s1 indicates it’s stored in the hash table so we don’t need to dedup. @@ -106,11 +106,11 @@ Since we know the size of a string object trivially, we know which strings are o - If `InterlockCompareExchangePointer` fails because the ref was modified while we were finding a copy to dedup to (or insert into the hash table), we skip this ref. - If too many collisions exist for a hash code, we skip deduping for strings with that hash code. This avoids the DoS attack by creating too many strings for the same hash. -- If the string is too large. At some point going through a very large string to calculate its hash code will become simply not worth the effort. We'll need to do some perf investigation to figure out a good limit. +- If the string is too large. At some point going through a very large string to calculate its hash code will become simply not worth the effort. We'll need to do some perf investigation to figure out a good limit. **Alternate design points** -- Should we calculate the hash codes for SOH strings as gen1 GCs promote them into gen2? +- Should we calculate the hash codes for SOH strings as gen1 GCs promote them into gen2? This would increase gen1 pause. @@ -132,7 +132,7 @@ The following scenarios become problematic or more problematic when deduping is - Mutating the string content -Strings are supposed to be immutable. However in unsafe code you can change the string content after it’s created. Changing string content already asking for trouble without deduping – you could be changing the interned copy which means you are modifying someone else’s string which could cause completely unpredictable results for them. +Strings are supposed to be immutable. However in unsafe code you can change the string content after it’s created. Changing string content already asking for trouble without deduping – you could be changing the interned copy which means you are modifying someone else’s string which could cause completely unpredictable results for them. The most common way is to use the fixed keyword: @@ -143,7 +143,7 @@ fixed (char* p = str) } ``` -There are other ways such as +There are other ways such as `((char*)(gcHandlePointingToString.AddrOfPinnedObject())) = 'c';` @@ -153,7 +153,7 @@ Or - Locking on a string -Locking on a string object is already discouraged due to a string can be interned. Having string dedup on can make this problematic more often if the string you called lock on is now deduped to a different string object. +Locking on a string object is already discouraged due to a string can be interned. Having string dedup on can make this problematic more often if the string you called lock on is now deduped to a different string object. - Reference equality @@ -187,7 +187,7 @@ To start with we will provide analysis for the following – I’m seeing that there are almost 600 places in libraries that do `fixed (char*` but hopefully most of them do not actually modify the string content. We should definitely be encouraging folks to switch to using `string.Create` like what [PR#31700](https://github.com/dotnet/runtime/pull/31700) did. -2. Using lock on a string object. +2. Using lock on a string object. - Reference equality checks on strings @@ -195,13 +195,13 @@ Since `ReferenceEquals` is performance critical API, we cannot do checks in its We do have some libraries that rely on `ReferenceEquals`. We need to figure out what to do about them. See discussion [here](https://github.com/dotnet/runtime/pull/31971#pullrequestreview-355531406). -- Additional checks in heap verification +- Additional checks in heap verification Heap verification will now include checks to verify that no one changes the string content after it’s hash is computed. This can be turned on when a certain level of COMPlus_HeapVerify is specified. - Stress mode -Instead of waiting till the productive moment to start the next deduping cycle, we can have a stress mode where we dedup randomly to catch problems sooner, same idea as GC stress to detect GC holes sooner. +Instead of waiting till the productive moment to start the next deduping cycle, we can have a stress mode where we dedup randomly to catch problems sooner, same idea as GC stress to detect GC holes sooner. We could even artificially create duplicates in this stress mode to find places that depend on object identity. @@ -219,8 +219,8 @@ We might see some performance gains using RTM (Restricted Transactional Memory) **Deduping other types of objects** -We might consider to not limit deduping to just strings. There was a discussion in [runtime issue #12628](https://github.com/dotnet/runtime/issues/12628). +We might consider to not limit deduping to just strings. There was a discussion in [runtime issue #12628](https://github.com/dotnet/runtime/issues/12628). -**Deduping long lived references on stack** +**Deduping long lived references on stack** -There might be merit to look into deduping long lived refs on the stack. The amount of work it requires and the return makes it low priority but it may help with some corner cases. \ No newline at end of file +There might be merit to look into deduping long lived refs on the stack. The amount of work it requires and the return makes it low priority but it may help with some corner cases. diff --git a/docs/design/features/additional-deps.md b/docs/design/features/additional-deps.md index b7046ea7067ca..aee11a4b85903 100644 --- a/docs/design/features/additional-deps.md +++ b/docs/design/features/additional-deps.md @@ -80,8 +80,8 @@ Where "found" means the version that is being used at run time including roll-fo ## 2.1 proposal (add an "any" tfm to store) For example, `\dotnet\store\x64\any\microsoft.applicationinsights\2.4.0` - -The `any` tfm would be used if the specified tfm (e.g. netcoreapp2.0) is not found: + +The `any` tfm would be used if the specified tfm (e.g. netcoreapp2.0) is not found: `\dotnet\store\x64\netcoreapp2.0\microsoft.applicationinsights\2.4.0` _Possible risk: doesn't this make "uninstall" more difficult? Because multiple installs may write the same packages and try to remove packages that another installer created?_ @@ -95,7 +95,7 @@ The current ordering for resolving deps files is: 1) The app's deps file 2) The additional-deps file(s) 3) The framework(s) deps file(s) - + The order is important because "first-in" wins. Since the additional-deps is before the framework, the additional-deps will "win" in all cases except during a minor\major roll-forward. The reason minor\major roll-forward is different is because the framework has special logic (new in 2.1) to compare assembly and file version numbers from the deps files, and pick the newest. The proposed ordering change for 2.1 is: @@ -108,7 +108,7 @@ In addition, the additional-deps will always look for assembly and file version ## 2.1 proposal (add runtimeconfig knob to to disable `%DOTNET_ADDITIONAL_DEPS%`) Add an `additionalDepsLookup` option to the runtimeconfig with these values: - + 0) The `%DOTNET_ADDITIONAL_DEPS%` is not used 1) `DOTNET_ADDITIONAL_DEPS` is used (the default) diff --git a/docs/design/features/code-versioning-profiler-breaking-changes.md b/docs/design/features/code-versioning-profiler-breaking-changes.md index 7a7add6323b08..760623741cb67 100644 --- a/docs/design/features/code-versioning-profiler-breaking-changes.md +++ b/docs/design/features/code-versioning-profiler-breaking-changes.md @@ -26,4 +26,4 @@ Code versioning, and in particular its use for tiered compilation means that the 2. The timing of ReJITCompilationFinished has been adjusted to be slightly earlier (after the new code body is generated, but prior to updating the previous jitted code to modify control flow). This raises a slim possibility for a ReJIT error to be reported after ReJITCompilationFinished in the case of OOM or process memory corruption. -There are likely some other variations of the changed behavior I haven't thought of yet, but if further testing, code review, or discussion brings it to the surface I'll add it here. Feel free to get in touch on github (@noahfalk), or if you have anything you want to discuss in private you can email me at noahfalk AT microsoft.com \ No newline at end of file +There are likely some other variations of the changed behavior I haven't thought of yet, but if further testing, code review, or discussion brings it to the surface I'll add it here. Feel free to get in touch on github (@noahfalk), or if you have anything you want to discuss in private you can email me at noahfalk AT microsoft.com diff --git a/docs/design/features/covariant-return-methods.md b/docs/design/features/covariant-return-methods.md index 3c9ac8c4db890..9416253ebf06f 100644 --- a/docs/design/features/covariant-return-methods.md +++ b/docs/design/features/covariant-return-methods.md @@ -2,7 +2,7 @@ Covariant return methods is a runtime feature designed to support the [covariant return types](https://github.com/dotnet/csharplang/blob/master/proposals/covariant-returns.md) and [records](https://github.com/dotnet/csharplang/blob/master/proposals/records.md) C# language features posed for C# 9.0. -This feature allows an overriding method to have a return type that is different than the one on the method it overrides, but compatible with it. The type compability rules are defined in ECMA I.8.7.1. Example: using a more derived return type. +This feature allows an overriding method to have a return type that is different than the one on the method it overrides, but compatible with it. The type compability rules are defined in ECMA I.8.7.1. Example: using a more derived return type. Covariant return methods can only be described through MethodImpl records, and as an initial implementation will only be applicable to methods on reference types. Methods on interfaces and value types will not be supported (may be supported later in the future). @@ -24,9 +24,9 @@ During enumeration of MethodImpls on a type (`MethodTableBuilder::EnumerateMetho + Load the `TypeHandle` of the return type of the method on base type. + Load the `TypeHandle` of the return type of the method on the current type being validated. + Verify that the second `TypeHandle` is compatible with the first `TypeHandle` using the `MethodTable::CanCastTo()` API. If they are not compatible, a TypeLoadException is thrown. - + The only exception where `CanCastTo()` will return true for an incompatible type according to the ECMA rules is for structs implementing interfaces, so we explicitly check for that case and throw a TypeLoadException if we hit it. - + Once a method is flagged for return type checking, every time the vtable slot containing that method gets overridden on a derived type, the new override will also be checked for compatiblity. This is to ensure that no derived type can implicitly override some virtual method that has already been overridden by some MethodImpl with a covariant return type. ### VTable Slot Unification @@ -64,7 +64,7 @@ This slot unification step will also take place during the last step of type loa An interface method may be both non-final and have a MethodImpl that declares that it overrides another interface method. If it does, NO other interface method may .override it. Instead further overrides must override the method that it overrode. Also the overriding method may only override 1 method. The default interface method resolution algorithm shall change from: - + ``` console Given interface method M and type T. Let MSearch = M diff --git a/docs/design/features/crossgen2-compilation-structure-enhancements.md b/docs/design/features/crossgen2-compilation-structure-enhancements.md index bf164d89fd0b0..7f993df95e884 100644 --- a/docs/design/features/crossgen2-compilation-structure-enhancements.md +++ b/docs/design/features/crossgen2-compilation-structure-enhancements.md @@ -59,7 +59,7 @@ Note, this approach is probably more complete than we will finish in one release For non-generic code this is straightforward. Either compile all the non-generic code in the binary, or compile only that which is specified via a profile guided optimization step. This choice shall be driven by a per "input assembly" switch as in the presence of a composite R2R image we likely will want to have different policy for different assemblies, as has proven valuable in the past. Until proven otherwise, per assembly specification of this behavior shall be considered to be sufficient. -We shall set a guideline for how much generic code to generate, and the amount of generic code to generate shall be gated as a multiplier of the amount of non-generic code generated. +We shall set a guideline for how much generic code to generate, and the amount of generic code to generate shall be gated as a multiplier of the amount of non-generic code generated. For generic code we also need a per assembly switch to adjust between various behaviors, but the proposal is as follows: @@ -87,7 +87,7 @@ Runtime Layer Each layer in this stack will be compiled as a consistent set of crossgen2 compilations. -I propose to reduce the generics duplication problem to allow duplication between layers, but not within a layer. There are two ways to do this. The first of which is to produce composite R2R images for a layer. Within a single composite R2R image generation, running heuristics and generating generics eagerly should be straightforward. This composite R2R image would have all instantiations statically computed that are local to that particular layer of compilation, and also any instantiations from other layers. The duplication problem would be reduced in that a single analysis would trigger these multi-layer dependent compilations, and so which there may be duplication between layers, there wouldn't be duplication within a layer. And given that the count of layers is not expected to exceed 3 or 4, that duplication will not be a major concern. +I propose to reduce the generics duplication problem to allow duplication between layers, but not within a layer. There are two ways to do this. The first of which is to produce composite R2R images for a layer. Within a single composite R2R image generation, running heuristics and generating generics eagerly should be straightforward. This composite R2R image would have all instantiations statically computed that are local to that particular layer of compilation, and also any instantiations from other layers. The duplication problem would be reduced in that a single analysis would trigger these multi-layer dependent compilations, and so which there may be duplication between layers, there wouldn't be duplication within a layer. And given that the count of layers is not expected to exceed 3 or 4, that duplication will not be a major concern. The second approach is to split compilation up into assembly level units, run the heuristics per assembly, generate the completely local generics in the individual assemblies, and then nominate a final mop up assembly that consumes a series of data files produced by the individual assembly compilations and holds all of the stuff that didn't make sense in the individual assemblies. In my opinion this second approach would be better for debug builds, but the first approach is strictly better for release builds, and really shouldn't be terribly slow. diff --git a/docs/design/features/event-counter.md b/docs/design/features/event-counter.md index 29fa231f03ae2..935a42b47f86f 100644 --- a/docs/design/features/event-counter.md +++ b/docs/design/features/event-counter.md @@ -108,4 +108,4 @@ For EventCounter and PollingCounter we expect simple viewers to use the display ### Metadata -To add any optional metadata about the counters that we do not already provide a way of encoding, users can call the `AddMetaData(string key, string value)` API. This API exists on all variants of the Counter APIs, and allows users to add one or many key-value pairs of metadata, which is dumped to the Payload as a comma-separated string value. This API exists so that users can add any metadata about their Counter that is not known to us and is different from the ones we provide by default (i.e. `DisplayName`, `CounterType`, `DisplayRateTimeScale`). \ No newline at end of file +To add any optional metadata about the counters that we do not already provide a way of encoding, users can call the `AddMetaData(string key, string value)` API. This API exists on all variants of the Counter APIs, and allows users to add one or many key-value pairs of metadata, which is dumped to the Payload as a comma-separated string value. This API exists so that users can add any metadata about their Counter that is not known to us and is different from the ones we provide by default (i.e. `DisplayName`, `CounterType`, `DisplayRateTimeScale`). diff --git a/docs/design/features/framework-version-resolution.md b/docs/design/features/framework-version-resolution.md index cd20470ce34f1..179db366aa41c 100644 --- a/docs/design/features/framework-version-resolution.md +++ b/docs/design/features/framework-version-resolution.md @@ -30,7 +30,7 @@ In the `.runtimeconfig.json` these values are defined like this: ``` #### Framework name -Each framework reference identifies the framework by its name. +Each framework reference identifies the framework by its name. Framework names are case sensitive (since they're used as folder names even on Linux systems). #### Version @@ -147,12 +147,12 @@ Pros Cons * Testing behavior of new releases with pre-release versions is not fully possible (see below). * Some special cases don't work: -One special case which would not work: -*Component A which asks for `2.0.0 LatestMajor` is loaded first on a machine which has `3.0.0` and also `3.1.0-preview` installed. Because it's the first in the process it will resolve the runtime according to the above rules - that is prefer release version - and thus will select `3.0.0`. -Later on component B is loaded which asks for `3.1.0-preview LatestMajor` (for example the one in active development). This load will fail since `3.0.0` is not enough to run this component. +One special case which would not work: +*Component A which asks for `2.0.0 LatestMajor` is loaded first on a machine which has `3.0.0` and also `3.1.0-preview` installed. Because it's the first in the process it will resolve the runtime according to the above rules - that is prefer release version - and thus will select `3.0.0`. +Later on component B is loaded which asks for `3.1.0-preview LatestMajor` (for example the one in active development). This load will fail since `3.0.0` is not enough to run this component. Loading the components in reverse order (B first and then A) will work since the `3.1.0-preview` runtime will be selected.* -Modification to automatic roll forward to latest patch: +Modification to automatic roll forward to latest patch: Existing behavior is to find a matching framework based on the above rules and then apply roll forward to latest patch (except if `Disable` is specified). The new behavior should be: * If the above rules find a matching pre-release version of a framework, then automatic roll forward to latest patch is not applied. * If the above rules find a matching release version of a framework, automatic roll forward to latest patch is applied. @@ -218,12 +218,12 @@ Items lower in the list override those higher in the list. At each precedence sc This setting is also described in [roll-forward-on-no-candidate-fx](roll-forward-on-no-candidate-fx.md). It can be specified as a property either for the entire `.runtimeconfig.json` or per framework reference (it has no environment variable of command line argument). It disables rolling forward to the latest patch. The host will compute effective value of `applyPatches` for each framework reference. -The `applyPatches` value is only considered if the effective `rollForward` value for a given framework reference is +The `applyPatches` value is only considered if the effective `rollForward` value for a given framework reference is * `LatestPatch` * `Minor` * `Major` -For the other values `applyPatches` is ignored. +For the other values `applyPatches` is ignored. *This is to maintain backward compatibility with `rollForwardOnNoCandidateFx`. `applyPatches` is now considered obsolete.* If `applyPatches` is set to `true` (the default), then the roll-forward rules described above apply fully. @@ -259,7 +259,7 @@ There's a direct mapping from the `rollForward` setting to the internal represen | `rollForward` | `version_compatibility_range` | `roll_to_highest_version` | | --------------------- | ----------------------------- | ------------------------------------------ | | `Disable` | `exact` | `false` | -| `LatestPatch` | `patch` | `false` (always picks latest patch anyway) | +| `LatestPatch` | `patch` | `false` (always picks latest patch anyway) | | `Minor` | `minor` | `false` | | `LatestMinor` | `minor` | `true` | | `Major` | `major` | `false` | @@ -306,7 +306,7 @@ Steps * By doing this for all `framework references` here, before the next loop, we minimize the number of re-try attempts. 4. For each `framework reference` in `config fx references`: 5. --> If the framework's `name` is not in `resolved frameworks` Then resolve the `framework reference` to the actual framework on disk: - * If the framework `name` already exists in the `effective fx references` reconcile the currently processed `framework reference` with the one from the `effective fx references` (see above for the algorithm). + * If the framework `name` already exists in the `effective fx references` reconcile the currently processed `framework reference` with the one from the `effective fx references` (see above for the algorithm). *Term "reconcile framework references" is used for this in the code, this used to be called "soft-roll-forward" as well.* * The reconciliation will always pick the higher `version` and will merge the `rollForward` and `applyPatches` settings. * The reconciliation may fail if it's not possible to roll forward from one `framework reference` to the other. @@ -368,7 +368,7 @@ This might be more of an issue for components (COM and such), which we will reco The above proposal will impact behavior of existing apps (because framework resolution is in `hostfxr` which is global on the machine for all frameworks). This is a description of the changes as they apply to apps using either default settings, `rollForwardOnNoCandidateFx` or `applyPatches`. ### Fixing ordering issues -In 2.* the algorithm had a bug in it which caused it to resolve different version depending solely on the order of framework references. Consider this example: +In 2.* the algorithm had a bug in it which caused it to resolve different version depending solely on the order of framework references. Consider this example: `Microsoft.NETCore.App` is available on the machine with versions `2.1.1` and `2.1.2`. diff --git a/docs/design/features/globalization-invariant-mode.md b/docs/design/features/globalization-invariant-mode.md index 3112ce7dd42a6..f3c4132fcc0df 100644 --- a/docs/design/features/globalization-invariant-mode.md +++ b/docs/design/features/globalization-invariant-mode.md @@ -1,5 +1,5 @@ # .NET Core Globalization Invariant Mode - + Author: [Tarek Mahmoud Sayed](https://github.com/tarekgh) The globalization invariant mode - new in .NET Core 2.0 - enables you to remove application dependencies on globalization data and [globalization behavior](https://docs.microsoft.com/en-us/dotnet/standard/globalization-localization/). This mode is an opt-in feature that provides more flexibility if you care more about reducing dependencies and the size of distribution than globalization functionality or globalization-correctness. @@ -17,7 +17,7 @@ The following scenarios are affected when the invariant mode is enabled. Their i - Time Zone display name on Linux ## Background - + Globalization rules and the data that represents those rules frequently change, often due to country-specific policy changes (for example, changes in currency symbol, sorting behavior or time zones). Developers expect globalization behavior to always be current and for their applications to adapt to new data over time. In order to keep up with those changes, .NET Core (and the .NET Framework, too) depends on the underlying OS to keep up with these changes. Relying on the underlying OS for globalization data has the following benefits: @@ -32,11 +32,11 @@ Globalization support has the following potential challenges for applications: * Installing/carrying the [ICU](http://icu-project.org) package on Linux (~28 MB). Note: On Linux, .NET Core relies on globalization data from ICU. For example, [.NET Core Linux Docker images](https://github.com/dotnet/dotnet-docker/blob/master/2.0/runtime-deps/stretch/amd64/Dockerfile) install this component. Globalization data is available on Windows and macOS as part of their base installs. - + ## Cultures and culture data - -When enabling the invariant mode, all cultures behave like the invariant culture. The invariant culture has the following characteristics: - + +When enabling the invariant mode, all cultures behave like the invariant culture. The invariant culture has the following characteristics: + * Culture names (English, native display, ISO, language names) will return invariant names. For instance, when requesting culture native name, you will get "Invariant Language (Invariant Country)". * All cultures LCID will have value 0x1000 (which means Custom Locale ID). The exception is the invariant cultures which will still have 0x7F. * All culture parents will be invariant. In other word, there will not be any neutral cultures by default but the apps can still create a culture like "en". @@ -45,45 +45,45 @@ When enabling the invariant mode, all cultures behave like the invariant culture * Numbers will always be formatted as the invariant culture. For example, decimal point will always be formatted as ".". Number strings previously formatted with cultures that have different symbols will fail parsing. * All cultures will have currency symbol as "¤" * Culture enumeration will always return a list with one culture which is the invariant culture. - + ## String casing - + String casing (ToUpper and ToLower) will be performed for the ASCII range only. Requests to case code points outside that range will not be performed, however no exception will be thrown. In other words, casing will only be performed for character range ['a'..'z']. - + Turkish I casing will not be supported when using Turkish cultures. - + ## String sorting and searching String operations like [Compare](https://docs.microsoft.com/dotnet/api/?term=string.compare), [IndexOf](https://docs.microsoft.com/dotnet/api/?term=string.indexof) and [LastIndexOf](https://docs.microsoft.com/dotnet/api/?term=string.lastindexof) are always performed as [ordinal](https://en.wikipedia.org/wiki/Ordinal_number) and not linguistic operations regardless of the string comparing options passed to the APIs. - + The [ignore case](https://docs.microsoft.com/dotnet/api/system.globalization.compareoptions.ignorecase) string sorting option is supported but only for the ASCII range as mentioned previously. - + For example, the following comparison will resolve to being unequal: * 'i', compared to * Turkish I '\u0130', given -* Turkish culture, using +* Turkish culture, using * CompareOptions.Ignorecase However, the following comparison will resolve to being equal: * 'i', compared to -* 'I', using +* 'I', using * CompareOptions.Ignorecase - + It is worth noticing that all other [sort comparison options](https://docs.microsoft.com/dotnet/api/system.globalization.compareoptions) (for example, ignore symbols, ignore space, Katakana, Hiragana) will have no effect in the invariant mode (they are ignored). - + ## Sort keys - + Sort keys are used mostly when indexing some data (for example, database indexing). When generating sort keys of 2 strings and comparing the sort keys the results should hold the exact same results as if comparing the original 2 strings. In the invariant mode, sort keys will be generated according to ordinal comparison while respecting ignore casing options. - + ## String normalization - -String normalization normalizes a string into some form (for example, composed, decomposed forms). Normalization data is required to perform these operations, which isn't available in invariant mode. In this mode, all strings are considered as already normalized, per the following behavior: -* If the app requested to normalize any string, the original string is returned without modification. +String normalization normalizes a string into some form (for example, composed, decomposed forms). Normalization data is required to perform these operations, which isn't available in invariant mode. In this mode, all strings are considered as already normalized, per the following behavior: + +* If the app requested to normalize any string, the original string is returned without modification. * If the app asked if any string is normalized, the return value will always be `true`. - + ## Internationalized Domain Names (IDN) support [Internationalized Domain Names](https://en.wikipedia.org/wiki/Internationalized_domain_name) require globalization data to perform conversion to ASCII or Unicode forms, which isn't available in the invariant mode. In this mode, IDN functionality has the following behavior: @@ -91,13 +91,13 @@ String normalization normalizes a string into some form (for example, composed, * IDN support doesn't conform to the latest standard. * IDN support will be incorrect if the input IDN string is not normalized since normalization is not supported in invariant mode. * Some basic IDN strings will still produce correct values. - + ## Time zone display name in Linux - + When running on Linux, ICU is used to get the time zone display name. In invariant mode, the standard time zone names are returned instead. - + ## Enabling the invariant mode - + Applications can enable the invariant mode by either of the following: 1. in project file: @@ -119,13 +119,13 @@ Applications can enable the invariant mode by either of the following: } } ``` - + 3. setting environment variable value `DOTNET_SYSTEM_GLOBALIZATION_INVARIANT` to `true` or `1`. Note: value set in project file or `runtimeconfig.json` has higher priority than the environment variable. ## APP behavior with and without the invariant config switch - + - If the invariant config switch is not set or it is set false - The framework will depend on the OS for the globalization support. - On Linux, if the ICU package is not installed, the application will fail to start. diff --git a/docs/design/features/host-component-dependencies-resolution.md b/docs/design/features/host-component-dependencies-resolution.md index 094f3d082e944..f107961714e56 100644 --- a/docs/design/features/host-component-dependencies-resolution.md +++ b/docs/design/features/host-component-dependencies-resolution.md @@ -40,4 +40,4 @@ This feature certainly provides a somewhat duplicate functionality to the existi * Currently we don't consider frameworks for the app when computing probing paths for resolving assets from the component's `.deps.json`. This is a different behavior from the app startup where these are considered. Is it important - needed? * Add ability to corelate tracing with the runtime - probably some kind of activity ID * Handling of native assets - currently returning just probing paths. Would be cleaner to return full resolved paths. But we would have to keep some probing paths. In the case of missing `.deps.json` the native library should be looked for in the component directory - thus requires probing - we can't figure out which of the files in the folder are native libraries in the hosts. -* Handling of satellite assemblies (resource assets) - currently returning just probing paths which exclude the culture. So from a resolved asset `./foo/en-us/resource.dll` we only take `./foo` as the probing path. Consider using full paths instead - probably would require more parsing as we would have to be able to figure out the culture ID somewhere to build the true map AssemblyName->path in the managed class. Just like for native assets, if there's no `.deps.json` the only possible solution is to use probing, so the probing semantics would have to be supported anyway. \ No newline at end of file +* Handling of satellite assemblies (resource assets) - currently returning just probing paths which exclude the culture. So from a resolved asset `./foo/en-us/resource.dll` we only take `./foo` as the probing path. Consider using full paths instead - probably would require more parsing as we would have to be able to figure out the culture ID somewhere to build the true map AssemblyName->path in the managed class. Just like for native assets, if there's no `.deps.json` the only possible solution is to use probing, so the probing semantics would have to be supported anyway. diff --git a/docs/design/features/host-error-codes.md b/docs/design/features/host-error-codes.md index 941c2a92a0013..9e246409011fa 100644 --- a/docs/design/features/host-error-codes.md +++ b/docs/design/features/host-error-codes.md @@ -7,7 +7,7 @@ Note that the exit code returned by running an application via `dotnet.exe` or ` * `Success` (`0`) - Operation was successful. -* `Success_HostAlreadyInitialized` (`0x00000001`) - Initialization was successful, but another host context is already initialized, so the returned context is "secondary". The requested context was otherwise fully compatible with the already initialized context. +* `Success_HostAlreadyInitialized` (`0x00000001`) - Initialization was successful, but another host context is already initialized, so the returned context is "secondary". The requested context was otherwise fully compatible with the already initialized context. This is returned by `hostfxr_initialize_for_runtime_config` if it's called when the host is already initialized in the process. Comes from `corehost_initialize` in `hostpolicy`. * `Success_DifferentRuntimeProperties` (`0x00000002`) - Initialization was successful, but another host context is already initialized and the requested context specified some runtime properties which are not the same (either in value or in presence) to the already initialized context. @@ -18,14 +18,14 @@ This is returned by `hostfxr_initialize_for_runtime_config` if it's called when * `InvalidArgFailure` (`0x80008081`) - One of the specified arguments for the operation is invalid. -* `CoreHostLibLoadFailure` (`0x80008082`) - There was a failure loading a dependent library. If any of the hosting components calls `LoadLibrary`/`dlopen` on a dependent library and the call fails, this error code is returned. The most common case for this failure is if the dependent library is missing some of its dependencies (for example the necessary CRT is missing on the machine), likely corrupt or incomplete install. +* `CoreHostLibLoadFailure` (`0x80008082`) - There was a failure loading a dependent library. If any of the hosting components calls `LoadLibrary`/`dlopen` on a dependent library and the call fails, this error code is returned. The most common case for this failure is if the dependent library is missing some of its dependencies (for example the necessary CRT is missing on the machine), likely corrupt or incomplete install. This error code is also returned from `corehost_resolve_component_dependencies` if it's called on a `hostpolicy` which has not been initialized via the hosting layer. This would typically happen if `coreclr` is loaded directly without the hosting layer and then `AssemblyDependencyResolver` is used (which is an unsupported scenario). * `CoreHostLibMissingFailure` (`0x80008083`) - One of the dependent libraries is missing. Typically when the `hostfxr`, `hostpolicy` or `coreclr` dynamic libraries are not present in the expected locations. Probably means corrupted or incomplete installation. * `CoreHostEntryPointFailure` (`0x80008084`) - One of the dependent libraries is missing a required entry point. -* `CoreHostCurHostFindFailure` (`0x80008085`) - If the hosting component is trying to use the path to the current module (the hosting component itself) and from it deduce the location of the installation. Either the location of the current module could not be determined (some weird OS call failure) or the location is not in the right place relative to other expected components. +* `CoreHostCurHostFindFailure` (`0x80008085`) - If the hosting component is trying to use the path to the current module (the hosting component itself) and from it deduce the location of the installation. Either the location of the current module could not be determined (some weird OS call failure) or the location is not in the right place relative to other expected components. For example the `hostfxr` may look at its location and try to deduce the location of the `shared` folder with the framework from it. It assumes the typical install layout on disk. If this doesn't work, this error will be returned. * `CoreClrResolveFailure` (`0x80008087`) - If the `coreclr` library could not be found. The hosting layer (`hostpolicy`) looks for `coreclr` library either next to the app itself (for self-contained) or in the root framework (for framework-dependent). This search can be done purely by looking at disk or more commonly by looking into the respective `.deps.json`. If the `coreclr` library is missing in `.deps.json` or it's there but doesn't exist on disk, this error is returned. @@ -61,7 +61,7 @@ For example the `hostfxr` may look at its location and try to deduce the locatio * Other inconsistencies (for example `rollForward` and `applyPatches` are not allowed to be specified in the same config file) * Any of the above failures reading the `.runtimecofig.dev.json` file * Self-contained `.runtimeconfig.json` used in `hostfxr_initialize_for_runtime_config` -Note that missing `.runtimconfig.json` is not an error (means self-contained app). +Note that missing `.runtimconfig.json` is not an error (means self-contained app). This error code is also used when there is a problem reading the CLSID map file in `comhost`. * `AppArgNotRunnable` (`0x80008094`) - Used internally when the command line for `dotnet.exe` doesn't contain path to the application to run. In such case the command line is considered to be a CLI/SDK command. This error code should never be returned to external caller. @@ -70,8 +70,8 @@ This error code is also used when there is a problem reading the CLSID map file * The `apphost` binary has not been imprinted with the path to the app to run (so freshly built `apphost.exe` from the branch will fail to run like this) * The `apphost` is a bundle (single-file exe) and it failed to extract correctly. -* `FrameworkMissingFailure` (`0x80008096`) - It was not possible to find a compatible framework version. This originates in `hostfxr` (`resolve_framework_reference`) and means that the app specified a reference to a framework in its `.runtimeconfig.json` which could not be resolved. The failure to resolve can mean that no such framework is available on the disk, or that the available frameworks don't match the minimum version specified or that the roll forward options specified excluded all available frameworks. -Typically this would be used if a 3.0 app is trying to run on a machine which has no 3.0 installed. +* `FrameworkMissingFailure` (`0x80008096`) - It was not possible to find a compatible framework version. This originates in `hostfxr` (`resolve_framework_reference`) and means that the app specified a reference to a framework in its `.runtimeconfig.json` which could not be resolved. The failure to resolve can mean that no such framework is available on the disk, or that the available frameworks don't match the minimum version specified or that the roll forward options specified excluded all available frameworks. +Typically this would be used if a 3.0 app is trying to run on a machine which has no 3.0 installed. It would also be used for example if a 32bit 3.0 app is running on a machine which has 3.0 installed but only for 64bit. * `HostApiFailed` (`0x80008097`) - Returned by `hostfxr_get_native_search_directories` if the `hostpolicy` could not calculate the `NATIVE_DLL_SEARCH_DIRECTORIES`. @@ -87,7 +87,7 @@ It would also be used for example if a 32bit 3.0 app is running on a machine whi * `SdkResolverResolveFailure` (`0x8000809b`) - Returned from `hostfxr_resolve_sdk2` when it fails to find matching SDK. Similar to `LibHostSdkFindFailure` but only used in the `hostfxr_resolve_sdk2`. -* `FrameworkCompatFailure` (`0x8000809c`) - During processing of `.runtimeconfig.json` there were two framework references to the same framework which were not compatible. This can happen if the app specified a framework reference to a lower-level framework which is also specified by a higher-level framework which is also used by the app. +* `FrameworkCompatFailure` (`0x8000809c`) - During processing of `.runtimeconfig.json` there were two framework references to the same framework which were not compatible. This can happen if the app specified a framework reference to a lower-level framework which is also specified by a higher-level framework which is also used by the app. For example, this would happen if the app referenced `Microsoft.AspNet.App` version 2.0 and `Microsoft.NETCore.App` version 3.0. In such case the `Microsoft.AspNet.App` has `.runtimeconfig.json` which also references `Microsoft.NETCore.App` but it only allows versions 2.0 up to 2.9 (via roll forward options). So the version 3.0 requested by the app is incompatible. * `FrameworkCompatRetry` (`0x8000809d`) - Error used internally if the processing of framework references from `.runtimeconfig.json` reached a point where it needs to reprocess another already processed framework reference. If this error is returned to the external caller, it would mean there's a bug in the framework resolution algorithm. @@ -100,10 +100,10 @@ For example, this would happen if the app referenced `Microsoft.AspNet.App` vers * `LibHostDuplicateProperty` (`0x800080a1`) - The `.runtimeconfig.json` specified by the app contains a runtime property which is also produced by the hosting layer. For example if the `.runtimeconfig.json` would specify a property `TRUSTED_PLATFORM_ROOTS`, this error code would be returned. It is not allowed to specify properties which are otherwise populated by the hosting layer (`hostpolicy`) as there is not good way to resolve such conflicts. -* `HostApiUnsupportedVersion` (`0x800080a2`) - Feature which requires certain version of the hosting layer binaries was used on a version which doesn't support it. +* `HostApiUnsupportedVersion` (`0x800080a2`) - Feature which requires certain version of the hosting layer binaries was used on a version which doesn't support it. For example if COM component specified to run on 2.0 `Microsoft.NETCore.App` - as that contains older version of `hostpolicy` which doesn't support the necessary features to provide COM services. -* `HostInvalidState` (`0x800080a3`) - Error code returned by the hosting APIs in `hostfxr` if the current state is incompatible with the requested operation. There are many such cases, please refer to the documentation of the hosting APIs for details. +* `HostInvalidState` (`0x800080a3`) - Error code returned by the hosting APIs in `hostfxr` if the current state is incompatible with the requested operation. There are many such cases, please refer to the documentation of the hosting APIs for details. For example if `hostfxr_get_runtime_property_value` is called with the `host_context_handle` `nullptr` (meaning get property from the active runtime) but there's no active runtime in the process. * `HostPropertyNotFound` (`0x800080a4`) - property requested by `hostfxr_get_runtime_property_value` doesn't exist. diff --git a/docs/design/features/host-probing.md b/docs/design/features/host-probing.md index 9883c676ca085..38df2347406b8 100644 --- a/docs/design/features/host-probing.md +++ b/docs/design/features/host-probing.md @@ -25,29 +25,29 @@ The dotnet host uses probing when it searches for actual file on disk for a give The library relative path in this case is `newtonsoft.json/11.0.2` and the asset relative path is `lib/netstandard2.0/Newtonsoft.Json.dll`. So the goal of the probing logic is to find the `Newtonsoft.Json.dll` file using the above relative paths. ## Probing -The probing itself is done by going over a list of probing paths, which are ordered according to their priority. For each path, the host will append the relative parts of the path as per above and see if the file actually exists on the disk. -If the file is found, the probing is done, and the full path just resolved is stored. +The probing itself is done by going over a list of probing paths, which are ordered according to their priority. For each path, the host will append the relative parts of the path as per above and see if the file actually exists on the disk. +If the file is found, the probing is done, and the full path just resolved is stored. If the file is not found, the probing continues with the next path on the list. If all paths are tried and the asset is still not found this is reported as an error (with the exception of app's `.deps.json` asset, in which case it's ignored). ## Probing paths The list of probing paths ordered according to their priority. First path in the list below is tried first and so on. -* Servicing paths - Servicing paths are only used for serviceable assets, that is the corresponding library record must specify `serviceable: true`. +* Servicing paths + Servicing paths are only used for serviceable assets, that is the corresponding library record must specify `serviceable: true`. The base servicing path is * On Windows x64 `%ProgramFiles(x86)%\coreservicing` * On Windows x86 `%ProgramFiles%\coreservicing` - * Otherwise (Linux/Mac) `$CORE_SERVICING` + * Otherwise (Linux/Mac) `$CORE_SERVICING` Given the base servicing path, the probing paths are * Servicing NI probe path `/|arch|` - this is used only for `runtime` assets * Servicing normal probe path `/pkgs` - this is used for all assets * The application (or framework if we're resolving framework assets) directory -* Framework directories - If the app (or framework) has dependencies on frameworks, these frameworks are used as probing paths. - The order is from the higher level framework to lower level framework. The app is considered the highest level, it direct dependencies are next and so on. - For assets from frameworks, only that framework and lower level frameworks are considered. +* Framework directories + If the app (or framework) has dependencies on frameworks, these frameworks are used as probing paths. + The order is from the higher level framework to lower level framework. The app is considered the highest level, it direct dependencies are next and so on. + For assets from frameworks, only that framework and lower level frameworks are considered. Note: These directories come directly out of the framework resolution process. Special note on Windows where global locations are always considered even if the app is not executed via the shared `dotnet.exe`. More details can be found in [Multi-level Shared FX Lookup](multilevel-sharedfx-lookup.md). * Shared store paths * `$DOTNET_SHARED_STORE/|arch|/|tfm|` - The environment variable `DOTNET_SHARED_STORE` can contain multiple paths, in which case each is appended with `|arch|/|tfm|` and used as a probing path. @@ -56,10 +56,10 @@ The list of probing paths ordered according to their priority. First path in the * On Windows, the global shared store is used * If running in WOW64 mode - `%ProgramFiles(x86)%\dotnet\store\|arch|\|tfm|` * Otherwise - `%ProgramFiles%\dotnet\store\|arch|\|tfm|` -* Additional probing paths +* Additional probing paths In these paths the `|arch|/|tfm|` string can be used and will be replaced with the actual values before using the path. * `--additionalprobingpath` command line arguments * `additionalProbingPaths` specified in `.runtimeconfig.json` and `.runtimeconfig.dev.json` for the app and each framework (highest to lowest) - Note about framework-dependent and self-contained apps. With regard to probing the main difference is that self-contained apps don't have any framework dependencies, so all assets (including assemblies which normally come from a framework) are probed for in the app's directory. \ No newline at end of file + Note about framework-dependent and self-contained apps. With regard to probing the main difference is that self-contained apps don't have any framework dependencies, so all assets (including assemblies which normally come from a framework) are probed for in the app's directory. diff --git a/docs/design/features/host-startup-hook.md b/docs/design/features/host-startup-hook.md index b5c29871a282a..e2b4623fab038 100644 --- a/docs/design/features/host-startup-hook.md +++ b/docs/design/features/host-startup-hook.md @@ -57,8 +57,8 @@ Each part may be either * the assembly name must be considered a valid assembly name as specified by the `AssemblyName` class. -Note that white-spaces are preserved and considered part of the specified -path/name. So for example path separator followed by a white-space and +Note that white-spaces are preserved and considered part of the specified +path/name. So for example path separator followed by a white-space and another path separator is invalid, since the white-space only string in between the path separators will be considered as assembly name. @@ -90,10 +90,10 @@ centralized, while still allowing user code to do its own thing if it so desires. The producer of `StartupHook.dll` needs to ensure that -`StartupHook.dll` is compatible with the dependencies specified in the -main application's deps.json, since those dependencies are put on the -Trusted Platform Assemblies (TPA) list during the runtime startup, -before `StartupHook.dll` is loaded. This means that `StartupHook.dll` +`StartupHook.dll` is compatible with the dependencies specified in the +main application's deps.json, since those dependencies are put on the +Trusted Platform Assemblies (TPA) list during the runtime startup, +before `StartupHook.dll` is loaded. This means that `StartupHook.dll` needs to be built against the same or lower version of .NET Core than the app. ## Example diff --git a/docs/design/features/host-tracing.md b/docs/design/features/host-tracing.md index be60e9e5db2c6..78f4b3ca4e759 100644 --- a/docs/design/features/host-tracing.md +++ b/docs/design/features/host-tracing.md @@ -1,6 +1,6 @@ # Host tracing -The various .NET Core host components provide detailed tracing of diagnostic information which can help solve issues around runtime, framework and assembly resolution and others. +The various .NET Core host components provide detailed tracing of diagnostic information which can help solve issues around runtime, framework and assembly resolution and others. ## Existing support Currently (as of .NET Core 2.1) the host tracing is only written to the `stderr` output of the process. It can be turned on by setting `COREHOST_TRACE=1`. @@ -47,13 +47,13 @@ The functions behave exactly the same in both components. The `listener` paramet * a pointer to an implementation of `host_trace_listener` which is then registered the only listener for all tracing. * `NULL` value which unregisters any previously registered listener. After this call tracing is disabled. -Custom host can and should register the trace listener as the first thing it does with the respective host component to ensure that all tracing is routed to it. +Custom host can and should register the trace listener as the first thing it does with the respective host component to ensure that all tracing is routed to it. -Only one trace listener can be registered at any given time. +Only one trace listener can be registered at any given time. Registering custom trace listener or setting it to `NULL` doesn't override the tracing enabled by environment variables. If a trace listener is registered and the `COREHOST_TRACE=1` is set as well, the traces will be routed to both the `stderr` as well as the registered listener. -The `hostfxr` component will propagate the trace listener to the `hostpolicy` component before it calls into it. So custom host only needs to register its trace listener with the `hostfxr` component and not both. The propagation of the trace listener is only done for the duration necessary after which it will be unregistered again. So custom host might need to register its own listener if it makes calls directly to `hostpolicy` on top of the calls to `hostfxr`. +The `hostfxr` component will propagate the trace listener to the `hostpolicy` component before it calls into it. So custom host only needs to register its trace listener with the `hostfxr` component and not both. The propagation of the trace listener is only done for the duration necessary after which it will be unregistered again. So custom host might need to register its own listener if it makes calls directly to `hostpolicy` on top of the calls to `hostfxr`. In case of new (.NET Core 3) `hostfxr` component which would call into an old (.NET Core 2.1) `hostpolicy` component, the `hostfxr` will not perform the propagation in any way since the older `hostpolicy` doesn't support this mechanism. The trace listener interface looks like this: @@ -68,14 +68,14 @@ struct host_trace_listener } ``` -The `message` parameter is a standard `NUL` terminated string and it's the message to trace with the respective verbosity level. +The `message` parameter is a standard `NUL` terminated string and it's the message to trace with the respective verbosity level. The `activityId` parameter is a standard `NUL` terminated string. It's used to correlate traces for a given binding event. The content of the string is not yet defined, but the trace listeners should consider it opaque. Trace listeners should include this string in the trace of the message in some form. The parameter may be `NULL` in which case the trace doesn't really belong to any specific binding event. Methods on the trace listener interface can be called from any thread in the app, and should be able to handle multiple calls at the same time from different threads. ## Future investments ### Trace content -Currently the host components tend to trace a lot. The trace contains lot of interesting information but it's done in a very verbose way which is sometimes hard to navigate. Future investment should look at the common scenarios which are using the host tracing and optimize the trace output for those scenarios. This doesn't necessarily mean decrease the amount of tracing, but possibly introduce "summary sections" which would describe the end result decisions for certain scenarios. +Currently the host components tend to trace a lot. The trace contains lot of interesting information but it's done in a very verbose way which is sometimes hard to navigate. Future investment should look at the common scenarios which are using the host tracing and optimize the trace output for those scenarios. This doesn't necessarily mean decrease the amount of tracing, but possibly introduce "summary sections" which would describe the end result decisions for certain scenarios. It would also be good to review the usage of verbose versus info tracing and make it consistent. ### Interaction with other diagnostics in the .NET Core diff --git a/docs/design/features/raw-eventlistener.md b/docs/design/features/raw-eventlistener.md index f13b6a6096e01..aaf562ff3b72a 100644 --- a/docs/design/features/raw-eventlistener.md +++ b/docs/design/features/raw-eventlistener.md @@ -23,7 +23,7 @@ public enum EventListenerSettings None, RawEventDispatch } -``` +``` This parameter is used to specify the desired dispatch behavior (in this case, do not deserialize event payloads). @@ -33,7 +33,7 @@ The new raw dispatch API will be: ``` public void OnEventWrittenRaw(RawEventWrittenEventArgs args); - + public sealed class RawEventWrittenEventArgs { @@ -52,9 +52,9 @@ public sealed class RawEventWrittenEventArgs public EventLevel Level { get; } public long OSThreadId { get; } public DateTime TimeStamp { get; } - + // Replacement properties for Payload and PayloadNames. - public ReadOnlySpan Metadata { get; } + public ReadOnlySpan Metadata { get; } public ReadOnlySpan Payload { get; } } ``` diff --git a/docs/design/features/readytorun-composite-format-design.md b/docs/design/features/readytorun-composite-format-design.md index b0fcf288874bf..afa7a4ed15745 100644 --- a/docs/design/features/readytorun-composite-format-design.md +++ b/docs/design/features/readytorun-composite-format-design.md @@ -50,7 +50,7 @@ we propose using two complementary strategies: * In the composite R2R file with embedded metadata, there must be a new table of COR headers and metadata blobs representing the MSIL metadata from all the input assemblies. The table -must be indexable by simple assembly name for fast lookup. +must be indexable by simple assembly name for fast lookup. * in contrast to managed assemblies and single-input R2R executables, composite R2R files don't expose any COR header (it's not meaningful as the file potentially contains a larger @@ -98,7 +98,7 @@ this encoding are still work in progress and likely to further evolve. version bubble is represented by an arbitrary mixture of single-input and composite R2R files. If that is the case, manifest metadata would need to be decoupled from the index to `READYTORUN_SECTION_ASSEMBLIES`. - + Alternatively we could make it such that `READYTORUN_SECTION_MANIFEST_METADATA` holds all component assemblies of the current composite image at the beginning of the AssemblyRef table followed by the other needed assemblies *within the version bubble outside of the current @@ -157,7 +157,7 @@ that could be subsequently opened by ILDASM or ILSpy. Ideally we should patch ILDASM / ILSpy to cleanly handle the composite R2R file format; sadly this may end up being a relatively complex change due to the presence of multiple MSIL metadata blocks in the -file. +file. # Required diagnostic changes diff --git a/docs/design/features/roll-forward-on-no-candidate-fx.md b/docs/design/features/roll-forward-on-no-candidate-fx.md index dbfc04b632cbe..7417498675d4d 100644 --- a/docs/design/features/roll-forward-on-no-candidate-fx.md +++ b/docs/design/features/roll-forward-on-no-candidate-fx.md @@ -11,19 +11,19 @@ If the version specified is a _production_ version, the default behavior is: ``` For instance: - + Desired version: 1.0.1 Available versions: 1.0.0, 1.0.1, 1.0.2, 1.0.3, 1.1.0, 1.1.1, 2.0.1 Chosen version: 1.0.3 - + Desired version: 1.0.1 Available versions: 1.0.0, 1.1.0-preview1-x, 1.1.0-preview2-x, 1.2.0-preview1-x Chosen version: 1.1.0-preview2-x - + Desired version: 1.0.1 Available versions: 1.0.0, 1.1.0-preview1-x, 1.2.0, 1.2.1-preview1-x Chosen version: 1.2.0 - + Desired version: 1.0.1 Available versions: 1.0.0, 2.0.0 Chosen version: there is no compatible version available @@ -40,14 +40,14 @@ This means _preview_ is never rolled forward to _production_. Desired version: 1.0.1-preview2-x Available versions: 1.0.1-preview2-x, 1.0.1-preview3-x Chosen version: 1.0.1-preview2-x - + Desired version: 1.0.1-preview2-x Available versions: 1.0.1-preview3-x Chosen version: 1.0.1-preview3-x - + Desired version: 1.0.1-preview2-x Available versions: 1.0.1, 1.0.2-preview3-x - Chosen version: there is no compatible version available + Chosen version: there is no compatible version available ## Settings to control behavior ### applyPatches @@ -59,7 +59,7 @@ Once a compatible framework version is chosen as explained above, the latest pat Desired version: 1.0.1 Available versions: 1.0.1, 1.0.2 Chosen version: 1.0.2 - + Patch roll forward: disabled Desired version: 1.0.1 Available versions: 1.0.1, 1.0.2 @@ -79,7 +79,7 @@ To specify the exact desired framework version, use the command line argument '- - Command line argument ('--roll-forward-on-no-candidate-fx' argument) - Runtime configuration file ('rollForwardOnNoCandidateFx' property) - DOTNET_ROLL_FORWARD_ON_NO_CANDIDATE_FX environment variable - + The valid values: 0) Off (_do not roll forward_) @@ -101,7 +101,7 @@ If this feature is enabled and no compatible framework version is found, we'll s Desired Version: 1.0.0 Available versions: 1.1.1, 1.1.3, 1.2.0 Chosen version: 1.1.1 - + Patch roll forward: enabled Roll Forward On No Candidate Fx: 0 (disabled) Desired Version: 1.0.0 @@ -111,7 +111,7 @@ If this feature is enabled and no compatible framework version is found, we'll s It's important to notice that, even if "Roll Forward On No Candidate Fx" is enabled, only the specified framework version will be accepted if the '--fx-version' argument is used. -Since there are three ways to specify the values, conflicts will be resolved by the order listed above (command line has priority over config, which has priority over the environment variable). +Since there are three ways to specify the values, conflicts will be resolved by the order listed above (command line has priority over config, which has priority over the environment variable). ``` For instance: @@ -123,8 +123,8 @@ Since there are three ways to specify the values, conflicts will be resolved by 'rollForwardOnNoCandidateFx' property is set to '1' DOTNET_ROLL_FORWARD_ON_NO_CANDIDATE_FX env var is set to '1' The feature is DISABLED. -``` - +``` + There is no inheritance when there are chained framework references. If the app references FX1, and FX1 references FX2, then the resolution of FX2 only takes into account settings from `.runtimeconfig.json` in FX1, CLI and env. variable. The settings in the app's `.runtimeconfig.json` have no effect on resolution of FX2. ## Multilevel SharedFx Lookup diff --git a/docs/design/features/source-generator-pinvokes.md b/docs/design/features/source-generator-pinvokes.md index 1efec51604bc4..fbd588950df9d 100644 --- a/docs/design/features/source-generator-pinvokes.md +++ b/docs/design/features/source-generator-pinvokes.md @@ -2,7 +2,7 @@ ## Purpose -The CLR possesses a rich built-in marshaling mechanism for interoperability with native code that is handled at runtime. This system was designed to free .NET developers from having to author complex and potentially ABI sensitive [type conversion code][typemarshal_link] from a managed to an unmanaged environment. The built-in system works with both [P/Invoke][pinvoke_link] (i.e. `DllImportAttribute`) and [COM interop](https://docs.microsoft.com/dotnet/standard/native-interop/cominterop). The generated portion is typically called an ["IL Stub"][il_stub_link] since the stub is generated by inserting IL instructions into a stream and then passing that stream to the JIT for compilation. +The CLR possesses a rich built-in marshaling mechanism for interoperability with native code that is handled at runtime. This system was designed to free .NET developers from having to author complex and potentially ABI sensitive [type conversion code][typemarshal_link] from a managed to an unmanaged environment. The built-in system works with both [P/Invoke][pinvoke_link] (i.e. `DllImportAttribute`) and [COM interop](https://docs.microsoft.com/dotnet/standard/native-interop/cominterop). The generated portion is typically called an ["IL Stub"][il_stub_link] since the stub is generated by inserting IL instructions into a stream and then passing that stream to the JIT for compilation. A consequence of this approach is that marshaling code is not immediately available post-link for AOT scenarios (e.g. [`crossgen`](../../workflow/building/coreclr/crossgen.md) and [`crossgen2`](crossgen2-compilation-structure-enhancements.md)). The immediate unavailability of this code has been mitigated by a complex mechanism to have marshalling code generated by during AOT compilation. The [IL Linker][ilinker_link] is another tool that struggles with runtime generated code since it is unable to understand all potential used types without seeing what is generated. diff --git a/docs/design/features/tiered-compilation.md b/docs/design/features/tiered-compilation.md index 242dbc94b3ac5..caedd79f94f14 100644 --- a/docs/design/features/tiered-compilation.md +++ b/docs/design/features/tiered-compilation.md @@ -92,4 +92,4 @@ Most of the implementation is relatively straightforward given the design and be 1. The current call counter implementation is utterly naive and using the PreStub has a high per-invocation cost relative to other more sophisticated implementation options. We expected it would need to change sooner, but so far despite having some measurable cost it hasn't been reached the top of the priority list for performance gain vs. work necessary. Part of what makes it not as bad as it looks is that there is a bound on the number of times it can be called for any one method and relative to typical 100,000 cycle costs for jitting a method even an expensive call counter doesn't make a huge impact. -2. Right now background compilation is limited to a single thread taken from the threadpool and used for up to 10ms. If we need more time than that we return the thread and request another. The goal is to be a good citizen in the threadpool's overall workload while still doing enough work in chunks that we get decent cache and thread quantum utilization. It's possible we could do better as the policy here hasn't been profiled much. Thus far we haven't profiled any performance issues that suggested we should be handling this differently. \ No newline at end of file +2. Right now background compilation is limited to a single thread taken from the threadpool and used for up to 10ms. If we need more time than that we return the thread and request another. The goal is to be a good citizen in the threadpool's overall workload while still doing enough work in chunks that we get decent cache and thread quantum utilization. It's possible we could do better as the policy here hasn't been profiled much. Thus far we haven't profiled any performance issues that suggested we should be handling this differently. diff --git a/docs/design/features/unloadability.md b/docs/design/features/unloadability.md index 4b61b0f8b3ce6..0d1cddd0f0967 100644 --- a/docs/design/features/unloadability.md +++ b/docs/design/features/unloadability.md @@ -1,4 +1,4 @@ -# `AssemblyLoadContext` unloadability +# `AssemblyLoadContext` unloadability ## Goals * Provide a building block for unloadable plug-ins * Users can load an assembly and its dependencies into an unloadable `AssemblyLoadContext`. @@ -100,7 +100,7 @@ Unloading is initialized by the user code calling `AssemblyLoadContext.Unload` m * The `AssemblyLoadContext` fires the `Unloading` event to allow the user code to perform cleanup if required (e.g. stop threads running inside of the context, remove references and destroy handles, etc.) * The `AssemblyLoadContext.InitiateUnload` method is called. It creates a strong GC handle referring to the `AssemblyLoadContext` to keep it around until the unload is complete. For example, finalizers of types that are loaded into the `AssemblyLoadContext` may need access to the `AssemblyLoadContext`. * Then it calls `AssemblyNative::PrepareForAssemblyLoadContextRelease` method with that strong handle as an argument, which in turn calls `CLRPrivBinderAssemblyLoadContext::PrepareForLoadContextRelease` -* That method stores the passed in strong GC handle in `CLRPrivBinderAssemblyLoadContext::m_ptrManagedStrongAssemblyLoadContext`. +* That method stores the passed in strong GC handle in `CLRPrivBinderAssemblyLoadContext::m_ptrManagedStrongAssemblyLoadContext`. * Then it decrements refcount of the `AssemblyLoaderAllocator` the `CLRPrivBinderAssemblyLoadContext` points to. * Finally, it destroys the strong handle to the managed `LoaderAllocator`. That allows the `LoaderAllocator` to be collected. ### Second phase of unloading diff --git a/docs/design/specs/Ecma-335-Augments.md b/docs/design/specs/Ecma-335-Augments.md index 5d3e99f19a8b4..df4e429912a11 100644 --- a/docs/design/specs/Ecma-335-Augments.md +++ b/docs/design/specs/Ecma-335-Augments.md @@ -195,13 +195,13 @@ c) In section II.23.2.6 LocalVarSig, replace the diagram with production rules: ```ebnf LocalVarSig ::= LOCAL_SIG Count LocalVarType+ - + LocalVarType ::= Type CustomMod* Constraint BYREF? Type CustomMod* BYREF Type CustomMod* TYPEDBYREF - + ``` d) In section II.23.2.10 Param, replace the diagram with production rules: @@ -227,7 +227,7 @@ f) In section II.23.2.12 Type, add a production rule to the definition of `Type` ```ebnf Type ::= CustomMod* Type - + ``` g) In sections II.23.2.12 Type and II.23.2.14 TypeSpec replace production rule diff --git a/docs/design/specs/PE-COFF.md b/docs/design/specs/PE-COFF.md index 69074307155d7..dff7248a326c9 100644 --- a/docs/design/specs/PE-COFF.md +++ b/docs/design/specs/PE-COFF.md @@ -19,11 +19,11 @@ PE/COFF Specification defines the structure of Debug Directory in section 5.1.1. | Offset | Size | Field | Description | |:-------|:-----|:---------------|----------------------------------------------------------------| | 0 | 4 | Signature | 0x52 0x53 0x44 0x53 (ASCII string: "RSDS") | -| 4 | 16 | Guid | GUID (Globally Unique Identifier) of the associated PDB. +| 4 | 16 | Guid | GUID (Globally Unique Identifier) of the associated PDB. | 20 | 4 | Age | Iteration of the PDB. The first iteration is 1. The iteration is incremented each time the PDB content is augmented. | 24 | | Path | UTF-8 NUL-terminated path to the associated .pdb file | -Guid and Age are used to match PE/COFF image with the associated PDB. +Guid and Age are used to match PE/COFF image with the associated PDB. The associated .pdb file may not exist at the path indicated by Path field. If it doesn't the Path, Guid and Age can be used to find the corresponding PDB file locally or on a symbol server. The exact search algorithm used by tools to locate the PDB depends on the tool and its configuration. @@ -37,11 +37,11 @@ If the containing PE/COFF file is deterministic the Guid field above and DateTim The entry doesn't have any data associated with it. All fields of the entry, but Type shall be zero. -Presence of this entry indicates that the containing PE/COFF file is deterministic. +Presence of this entry indicates that the containing PE/COFF file is deterministic. ### Embedded Portable PDB Debug Directory Entry (type 17) -Declares that debugging information is embedded in the PE file at location specified by PointerToRawData. +Declares that debugging information is embedded in the PE file at location specified by PointerToRawData. *Version Major=any, Minor=0x0100* of the data format: @@ -49,7 +49,7 @@ Declares that debugging information is embedded in the PE file at location speci |:-------|:---------------|:-----------------|-------------------------------------------------------| | 0 | 4 | Signature | 0x4D 0x50 0x44 0x42 | | 4 | 4 | UncompressedSize | The size of decompressed Portable PDB image | -| 8 | SizeOfData - 8 | PortablePdbImage | Portable PDB image compressed using Deflate algorithm | +| 8 | SizeOfData - 8 | PortablePdbImage | Portable PDB image compressed using Deflate algorithm | If both CodeView and Embedded Portable PDB entries are present then they shall represent the same data. @@ -70,9 +70,9 @@ The value of Stamp field in the entry shall be 0. Stores crypto hash of the content of the symbol file the PE/COFF file was built with. -The hash can be used to validate that a given PDB file was built with the PE/COFF file and not altered in any way. +The hash can be used to validate that a given PDB file was built with the PE/COFF file and not altered in any way. -More than one entry can be present, in case multiple PDBs were produced during the build of the PE/COFF file (e.g. private and public symbols). +More than one entry can be present, in case multiple PDBs were produced during the build of the PE/COFF file (e.g. private and public symbols). *Version Major=0x0001, Minor=0x0000* of the entry data format is following: @@ -103,7 +103,7 @@ When validating that Portable PDB matches the debug directory record check that If the symbol format is Windows PDB the checksum is calculated by hashing the entire content of the PDB file with the PDB signature comprising of 16B GUID and 4B timestamp zeroed. -When validating that Windows PDB matches the debug directory record check that the checksums match and that the PDB signature (both GUID and timestamp values) match the data in the corresponding [CodeView record](#WindowsCodeViewEntry). +When validating that Windows PDB matches the debug directory record check that the checksums match and that the PDB signature (both GUID and timestamp values) match the data in the corresponding [CodeView record](#WindowsCodeViewEntry). > Note that when the debugger (or other tool) searches for the PDB only GUID and Age fields are used to match the PDB, but the timestamp of the CodeView debug directory entry does not need to match the timestamp stored in the PDB. Therefore, to verify byte-for-byte identity of the PDB, the timestamp field should also be checked. diff --git a/docs/design/specs/PortablePdb-Metadata.md b/docs/design/specs/PortablePdb-Metadata.md index d3d9b1277abd1..d642bc0a6523c 100644 --- a/docs/design/specs/PortablePdb-Metadata.md +++ b/docs/design/specs/PortablePdb-Metadata.md @@ -34,7 +34,7 @@ When debugging metadata is generated to a separate data blob "#Pdb" and "#~" str #### #Pdb stream The #Pdb stream has the following structure: - + | Offset | Size | Field | Description | |:-------|:-----|:---------------|----------------------------------------------------------------| | 0 | 20 | PDB id | A byte sequence uniquely representing the debugging metadata blob content. | @@ -42,10 +42,10 @@ The #Pdb stream has the following structure: | 24 | 8 | ReferencedTypeSystemTables | Bit vector of referenced type system metadata tables, let n be the number of bits that are 1. | | 32 | 4*n | TypeSystemTableRows | Array of n 4-byte unsigned integers indicating the number of rows for each referenced type system metadata table. | -#### #~ stream +#### #~ stream "#~" stream shall only contain debugging information tables defined above. - + References to heaps (strings, blobs, guids) are references to heaps of the debugging metadata. The sizes of references to type system tables are determined using the algorithm described in ECMA-335-II Chapter 24.2.6, except their respective row counts are found in _TypeSystemTableRows_ field of the #Pdb stream. ### Document Table: 0x30 @@ -58,7 +58,7 @@ The Document table has the following columns: The table is not required to be sorted. -There shall be no duplicate rows in the _Document_ table, based upon document name. +There shall be no duplicate rows in the _Document_ table, based upon document name. _Name_ shall not be nil. It can however encode an empty name string. @@ -170,7 +170,7 @@ _InitialDocument_ is only present if the _Document_ field of the _MethodDebugInf | _δILOffset_ | 0 | unsigned compressed | | _Document_ | Document row id | unsigned compressed | -Each _SequencePointRecord_ represents a single sequence point. The sequence point inherits the value of _Document_ property from the previous record (_SequencePointRecord_ or _document-record_), from the _Document_ field of the _MethodDebugInformation_ table if it's the first sequence point of a method body that spans a single document, or from _InitialDocument_ if it's the first sequence point of a method body that spans multiple documents. The value of _IL Offset_ is calculated using the value of the previous sequence point (if any) and the value stored in the record. +Each _SequencePointRecord_ represents a single sequence point. The sequence point inherits the value of _Document_ property from the previous record (_SequencePointRecord_ or _document-record_), from the _Document_ field of the _MethodDebugInformation_ table if it's the first sequence point of a method body that spans a single document, or from _InitialDocument_ if it's the first sequence point of a method body that spans multiple documents. The value of _IL Offset_ is calculated using the value of the previous sequence point (if any) and the value stored in the record. The values of _Start Line_, _Start Column_, _End Line_ and _End Column_ of a non-hidden sequence point are calculated based upon the values of the previous non-hidden sequence point (if any) and the data stored in the record. @@ -253,14 +253,14 @@ There shall be no duplicate rows in the LocalConstant table, based upon owner an The structure of the blob is Blob ::= CustomMod* (PrimitiveConstant | EnumConstant | GeneralConstant) - - PrimitiveConstant ::= PrimitiveTypeCode PrimitiveValue + + PrimitiveConstant ::= PrimitiveTypeCode PrimitiveValue PrimitiveTypeCode ::= BOOLEAN | CHAR | I1 | U1 | I2 | U2 | I4 | U4 | I8 | U8 | R4 | R8 | STRING - - EnumConstant ::= EnumTypeCode EnumValue EnumType + + EnumConstant ::= EnumTypeCode EnumValue EnumType EnumTypeCode ::= BOOLEAN | CHAR | I1 | U1 | I2 | U2 | I4 | U4 | I8 | U8 EnumType ::= TypeDefOrRefOrSpecEncoded - + GeneralConstant ::= (CLASS | VALUETYPE) TypeDefOrRefOrSpecEncoded GeneralValue? | OBJECT @@ -289,18 +289,18 @@ The encoding of the _PrimitiveValue_ and _EnumValue_ is determined based upon th | ```U8``` | uint64 | | ```R4``` | float32 | | ```R8``` | float64 | -| ```STRING``` | A single byte 0xff (represents a null string reference), or a UTF-16 little-endian encoded string (possibly empty). | +| ```STRING``` | A single byte 0xff (represents a null string reference), or a UTF-16 little-endian encoded string (possibly empty). | The numeric values of the type codes are defined by ECMA-335 §II.23.1.16. _EnumType_ must be an enum type as defined in ECMA-335 §II.14.3. The value of _EnumTypeCode_ must match the underlying type of the _EnumType_. -The encoding of the _GeneralValue_ is determined based upon the type expressed by _TypeDefOrRefOrSpecEncoded_ specified in _GeneralConstant_. _GeneralValue_ for special types listed in the table below has to be present and is encoded as specified. If the _GeneralValue_ is not present the value of the constant is the default value of the type. If the type is a reference type the value is a null reference, if the type is a pointer type the value is a null pointer, etc. +The encoding of the _GeneralValue_ is determined based upon the type expressed by _TypeDefOrRefOrSpecEncoded_ specified in _GeneralConstant_. _GeneralValue_ for special types listed in the table below has to be present and is encoded as specified. If the _GeneralValue_ is not present the value of the constant is the default value of the type. If the type is a reference type the value is a null reference, if the type is a pointer type the value is a null pointer, etc. | Namespace | Name | _GeneralValue_ encoding | |:--------------|:---------|:-------------------------| | System | Decimal | sign (highest bit), scale (bits 0..7), low (uint32), mid (uint32), high (uint32) | -| System | DateTime | int64: ticks | +| System | DateTime | int64: ticks | ### ImportScope Table: 0x35 The ImportScope table has the following columns: diff --git a/docs/issues-pr-management.md b/docs/issues-pr-management.md index 05090b2332e5f..503de72b452d3 100644 --- a/docs/issues-pr-management.md +++ b/docs/issues-pr-management.md @@ -12,7 +12,7 @@ dotnet/runtime issues and pull requests are a shared resource. As such, it will Here are a few of the most salient components of working well together, and the FAQ has much more detail. ## Scenarios where we all have to work together: -- All incoming issues and pull requests will be automatically labeled with an `area-*` label. The bot will also assign the `untriaged` label to only issues, once they get created. +- All incoming issues and pull requests will be automatically labeled with an `area-*` label. The bot will also assign the `untriaged` label to only issues, once they get created. - All issues and pull requests should have exactly 1 `area-*` label. - Issues are considered triaged when the `untriaged` label has been removed. - When issues have `area-*` labels switched, the `untriaged` label must be added. This prevents issues being lost in a `triaged` state when they have not actually been triaged by the area owner. In the future, a bot may automatically ensure this happens. diff --git a/docs/project/dogfooding.md b/docs/project/dogfooding.md index 6b564f7ec239c..b584ada81b85d 100644 --- a/docs/project/dogfooding.md +++ b/docs/project/dogfooding.md @@ -73,7 +73,7 @@ To install additional .NET Core runtimes or SDKs: ... - + ``` (Documentation for configuring feeds is [here](https://docs.microsoft.com/en-us/nuget/consume-packages/configuring-nuget-behavior).) diff --git a/docs/project/linux-performance-tracing.md b/docs/project/linux-performance-tracing.md index 2899d7e683a26..bb39bf904629a 100644 --- a/docs/project/linux-performance-tracing.md +++ b/docs/project/linux-performance-tracing.md @@ -1,4 +1,4 @@ -Performance Tracing on Linux +Performance Tracing on Linux ============================ When a performance problem is encountered on Linux, these instructions can be used to gather detailed information about what was happening on the machine at the time of the performance problem. diff --git a/docs/project/strong-name-signing.md b/docs/project/strong-name-signing.md index c082df3743056..269bb102c4ad2 100644 --- a/docs/project/strong-name-signing.md +++ b/docs/project/strong-name-signing.md @@ -10,7 +10,7 @@ All .NET Core assemblies are [strong-named](https://docs.microsoft.com/en-us/dot ## FAQ ### 1. Microsoft strong-names their assemblies, should I? -For the most part, the majority of applications do not need strong-names. Strong-names are left over from previous eras of .NET where [sandboxing](https://en.wikipedia.org/wiki/Sandbox_(computer_security)) needed to differentiate between code that was trusted, versus code that was untrusted. However in recent years, sandboxing via AppDomains, especially to [isolate ASP.NET web applications](https://support.microsoft.com/en-us/help/2698981/asp-net-partial-trust-does-not-guarantee-application-isolation), is no longer guaranteed and is not recommended. +For the most part, the majority of applications do not need strong-names. Strong-names are left over from previous eras of .NET where [sandboxing](https://en.wikipedia.org/wiki/Sandbox_(computer_security)) needed to differentiate between code that was trusted, versus code that was untrusted. However in recent years, sandboxing via AppDomains, especially to [isolate ASP.NET web applications](https://support.microsoft.com/en-us/help/2698981/asp-net-partial-trust-does-not-guarantee-application-isolation), is no longer guaranteed and is not recommended. However, strong-names are still required in applications in some rare situations, most of which are called out on this page: [Strong-Named Assemblies](https://docs.microsoft.com/en-us/dotnet/framework/app-domains/strong-named-assemblies). @@ -21,6 +21,6 @@ There are three major problems that developers run into after strong naming thei 1. _Binding Policy_. When developers talk about strong-names, they are usually conflating it with the strict binding policy of the .NET Framework that kicks in _when_ you strong-name. This binding policy is problematic because it forces, by default, an exact match between reference and version, and requires developers to author complex [binding redirects](https://docs.microsoft.com/en-us/dotnet/framework/configure-apps/file-schema/runtime/bindingredirect-element) when they don't. In recent versions of Visual Studio, however, we've added [Automatic Binding Redirection](https://docs.microsoft.com/en-us/dotnet/framework/configure-apps/how-to-enable-and-disable-automatic-binding-redirection) as an attempt to reduce pain of this policy on developers. On top of this, all newer platforms, including _Silverlight_, _WinRT-based platforms_ (Phone and Store), _.NET Native_ and _ASP.NET 5_ this policy has been loosened, allowing later versions of an assembly to satisfy earlier references, thereby completely removing the need to ever write binding redirects on those platforms. -2. _Virality_. Once you've strong-named an assembly, you can only statically reference other strong-named assemblies. +2. _Virality_. Once you've strong-named an assembly, you can only statically reference other strong-named assemblies. 3. _No drop-in replacement_. This is a problem for open source libraries where the strong-name private key is not checked into the repository. This means that developers are unable to build to their own version of the library and then use it as a drop-in replacement without recompiling _all_ consuming libraries up stack to pick up the new identity. This is extremely problematic for libraries, such as Json.NET, which have large incoming dependencies. Firstly, we would recommend that these open source projects check-in their private key (remember, [strong-names are used for identity, and not for security](https://docs.microsoft.com/en-us/dotnet/framework/app-domains/strong-named-assemblies)). Failing that, however, we've introduced a new concept called [Public Signing](public-signing.md) that enables developers to build drop-in replacements without needing access to the strong-name private key. This is the mechanism that .NET Core libraries use by default. diff --git a/docs/project/versioning.md b/docs/project/versioning.md index ab5b027397e27..728195b10845c 100644 --- a/docs/project/versioning.md +++ b/docs/project/versioning.md @@ -57,4 +57,4 @@ The version we produce by our calculations is mainly used in two places: - As the [Assembly File Version](https://msdn.microsoft.com/en-us/library/51ket42z(v=vs.110).aspx) - As the packages version number -To get more information on where are we doing the calculations for the versioning, you can [click here](https://github.com/dotnet/buildtools/blob/master/src/Microsoft.DotNet.Build.Tasks/PackageFiles/versioning.targets) to find the targets file where we create the versioning assets, and [here](https://github.com/dotnet/buildtools/blob/master/src/Microsoft.DotNet.Build.Tasks/GenerateCurrentVersion.cs) to see the code on where we calculate BuildNumberMajor and BuildNumberMinor. \ No newline at end of file +To get more information on where are we doing the calculations for the versioning, you can [click here](https://github.com/dotnet/buildtools/blob/master/src/Microsoft.DotNet.Build.Tasks/PackageFiles/versioning.targets) to find the targets file where we create the versioning assets, and [here](https://github.com/dotnet/buildtools/blob/master/src/Microsoft.DotNet.Build.Tasks/GenerateCurrentVersion.cs) to see the code on where we calculate BuildNumberMajor and BuildNumberMinor. diff --git a/docs/project/windows-performance-tracing.md b/docs/project/windows-performance-tracing.md index 04fab29feca9f..d23fc61f092a5 100644 --- a/docs/project/windows-performance-tracing.md +++ b/docs/project/windows-performance-tracing.md @@ -11,4 +11,4 @@ PerfView has significant documentation built-in, which includes: To get started, download PerfView and use the links on the main screen to get help. -If you have specific questions, please post them in an issue here. \ No newline at end of file +If you have specific questions, please post them in an issue here. diff --git a/docs/project/writing-tests.md b/docs/project/writing-tests.md index f2984f020b7b0..dd66fc96cd2b2 100644 --- a/docs/project/writing-tests.md +++ b/docs/project/writing-tests.md @@ -56,7 +56,7 @@ public async Task Headers_SetAfterRequestSubmitted_ThrowsInvalidOperationExcepti ``` # OuterLoop -This one is fairly simple but often used incorrectly. When running tests which depend on outside influences like e.g. Hardware (Internet, SerialPort, ...) and you can't mitigate these dependencies, you might consider using the `[OuterLoop]` attribute for your test. +This one is fairly simple but often used incorrectly. When running tests which depend on outside influences like e.g. Hardware (Internet, SerialPort, ...) and you can't mitigate these dependencies, you might consider using the `[OuterLoop]` attribute for your test. With this attribute, tests are executed in a dedicated CI loop and won't break the default CI loops which get created when you submit a PR. To run OuterLoop tests locally you need to set the msbuild property "OuterLoop" to true: `/p:OuterLoop=true`. To run OuterLoop tests in CI you need to mention dotnet-bot and identify the tests you want to run. See `@dotnet-bot help` for the exact loop names. diff --git a/docs/workflow/README.md b/docs/workflow/README.md index 7a17fecd50eff..cc08240c14aa1 100644 --- a/docs/workflow/README.md +++ b/docs/workflow/README.md @@ -42,7 +42,7 @@ To build just one part you use the root build script (build.cmd/sh), and you add ## Configurations -You may need to build the tree in a combination of configurations. This section explains why. +You may need to build the tree in a combination of configurations. This section explains why. A quick reminder of some concepts -- see the [glossary](../project/glossary.md) for more on these: @@ -51,7 +51,7 @@ A quick reminder of some concepts -- see the [glossary](../project/glossary.md) * **Release Configuration** -- Optimized code. Asserts are disabled. Runs at the best speed, and suitable for performance profiling. You will have limited debugging experience. When we talk about mixing configurations, we're discussing the following sub-components: - + * **Runtime** is the execution engine for managed code and there are two different implementations available. Both are written in C/C++, therefore, easier to debug when built in a Debug configuration. * CoreCLR is the comprehensive execution engine which if build in Debug Configuration it executes managed code very slowly. For example, it will take a long time to run the managed code unit tests. The code lives under [src/coreclr](../../src/coreclr). * Mono is portable and also slimmer runtime and it's not that sensitive to Debug Configuration for running managed code. You will still need to build it without optimizations to have good runtime debugging experience though. The code lives under [src/mono](../../src/mono). diff --git a/docs/workflow/building/libraries/README.md b/docs/workflow/building/libraries/README.md index 84daa4a51462c..de6969f286582 100644 --- a/docs/workflow/building/libraries/README.md +++ b/docs/workflow/building/libraries/README.md @@ -185,7 +185,7 @@ You can iterate on `System.Private.CoreLib` by running: build.cmd clr.corelib+clr.nativecorelib+libs.pretest -rc Release ``` -When this `System.Private.CoreLib` will be built in Release mode, then it will be crossgen'd and we will update the testhost to the latest version of corelib. +When this `System.Private.CoreLib` will be built in Release mode, then it will be crossgen'd and we will update the testhost to the latest version of corelib. You can use the same workflow for mono runtime by using `mono.corelib+libs.pretest` subsets. diff --git a/docs/workflow/building/libraries/freebsd-instructions.md b/docs/workflow/building/libraries/freebsd-instructions.md index 09008a4f49c49..d62686b60839b 100644 --- a/docs/workflow/building/libraries/freebsd-instructions.md +++ b/docs/workflow/building/libraries/freebsd-instructions.md @@ -20,18 +20,18 @@ This is certainly undesirable and it should be avoided if possible. ``` mkdir ~/dotnet cd ~/dotnet -curl https://dotnetcli.blob.core.windows.net/dotnet/Sdk/master/dotnet-sdk-latest-freebsd-x64.tar.gz | tar xfz - +curl https://dotnetcli.blob.core.windows.net/dotnet/Sdk/master/dotnet-sdk-latest-freebsd-x64.tar.gz | tar xfz - ``` -if on 12.x you may also need to set `LD_PRELOAD` to `/usr/lib/libpthread.so` to avoid issue when cli freezes. +if on 12.x you may also need to set `LD_PRELOAD` to `/usr/lib/libpthread.so` to avoid issue when cli freezes. As of summer 2019 this CLI is no longer good enough to build all repos. If that is your case jump to section [Updating CLI](#updating--bootstrap-cli) Binary snapshot can be obtained from https://github.com/wfurt/blob as dotnet-sdk-freebsd-x64-latest.tgz ## Getting sources -master of source-build pulls in source code of specific snapshot instead of tip of master branches. -That is generally OK but in case of FreeBSD it may miss some changes crucial for build. -(or pending un-submitted change) +master of source-build pulls in source code of specific snapshot instead of tip of master branches. +That is generally OK but in case of FreeBSD it may miss some changes crucial for build. +(or pending un-submitted change) ``` git clone https://github.com/dotnet/source-build @@ -44,9 +44,9 @@ git submodule update (cd src/coreclr ; git checkout master) ``` -port change from +port change from ```https://github.com/dotnet/corefx/commit/037859ac403ef17879655bb2f2e821d52e6eb4f3``` -In ideal case we could sync up to **master** but that brings Arcade changes and **breaks** the build. +In ideal case we could sync up to **master** but that brings Arcade changes and **breaks** the build. Bootstrap Arcade ``` @@ -86,8 +86,8 @@ index 81b8c7b..bb26868 100644 $(BuildArguments) -PortableBuild=$(PortableBuild) ``` -Depending of the day and moon phase you may need to get some updates as well. -If build breaks look for pending PRs with FreeBSD tag or label and pull pending changes. +Depending of the day and moon phase you may need to get some updates as well. +If build breaks look for pending PRs with FreeBSD tag or label and pull pending changes. ## Building @@ -106,7 +106,7 @@ export DOTNET_CLI_TELEMETRY_OPTOUT=1 ``` In ideal situation this will build whole sdk. Right now it fails somewhere in cli. -There is problem with rebuild and build will attempt to patch files again and/or make git updates. +There is problem with rebuild and build will attempt to patch files again and/or make git updates. ```export SOURCE_BUILD_SKIP_SUBMODULE_CHECK=1``` @@ -114,23 +114,23 @@ To build single repo again one can do: ```./build.sh /p:RootRepo=corefx /p:SkipRepoReferences=true ``` ## Resolving issues -Rebuild or source-build has issues. -Often running ```clean.sh``` from top helps. Be careful, that may undo any local pending changes. +Rebuild or source-build has issues. +Often running ```clean.sh``` from top helps. Be careful, that may undo any local pending changes. Sometimes it would try to apply patches and it would fail. -You can pass -```/p:SkipPatches=true``` to top level build.sh script. +You can pass +```/p:SkipPatches=true``` to top level build.sh script. ## Running CoreFX tests -Follow steps above to build at least corefx and it's dependencies. +Follow steps above to build at least corefx and it's dependencies. TBD ## Updating bootstrap CLI. -As build changes, previous versions of CLI may not be good enough any more. Changes in runtime or build dependency on 3.0 JSON are some example of braking changes. Following steps outline steps to update published CLI to what build needs. It will require other system where builds is supported. As close similarity and availability Linux will be used in examples bellow but Windows or MacOS should also yield same result. +As build changes, previous versions of CLI may not be good enough any more. Changes in runtime or build dependency on 3.0 JSON are some example of braking changes. Following steps outline steps to update published CLI to what build needs. It will require other system where builds is supported. As close similarity and availability Linux will be used in examples bellow but Windows or MacOS should also yield same result. Often build would ask for slightly different version without actually have real dependency on it (that is part of rolling updates across repos). One can cheat in this case and simply: @@ -138,12 +138,12 @@ One can cheat in this case and simply: ln -s ~/dotnet/sdk/old_version ~/dotnet/sdk/new_version ``` - + ### Finding versions and commit hashes -First we need to find what version are are trying to recreate. That is 'sdk' section in global.json in each repo. As of preview9ih time, this is set to 3.0.100-preview6-012264 and such version will be used in examples. One advantage of using release branches is that it is in coherent state e.g. all repos should need exactly same version. +First we need to find what version are are trying to recreate. That is 'sdk' section in global.json in each repo. As of preview9ih time, this is set to 3.0.100-preview6-012264 and such version will be used in examples. One advantage of using release branches is that it is in coherent state e.g. all repos should need exactly same version. -Let's get SDK for supported OS. Sync code base to same version you are trying to build on FreeBSD. +Let's get SDK for supported OS. Sync code base to same version you are trying to build on FreeBSD. ``` ./eng/common/build.sh --restore Downloading 'https://dot.net/v1/dotnet-install.sh' @@ -186,7 +186,7 @@ cd core-sdk git checkout be3f0c1a03f80492d45396c9f5b855b10a8a0b79 ``` -Set variables and assemble SKD without crossgen. (set DropSuffix=true to strip `preview6` from version). +Set variables and assemble SKD without crossgen. (set DropSuffix=true to strip `preview6` from version). ``` export DISABLE_CROSSGEN=true export CLIBUILD_SKIP_TESTS=true @@ -212,7 +212,7 @@ cd coreclr git checkout 7ec87b0097fdd4400a8632a2eae56612914579ef ``` -and build +and build ``` mkdir -p .dotnet curl https://dotnetcli.blob.core.windows.net/dotnet/Sdk/master/dotnet-sdk-latest-freebsd-x64.tar.gz | tar xfz - -C .dotnet @@ -247,7 +247,7 @@ git checkout d47cae744ddfb625db8e391cecb261e4c3d7bb1c ``` #### Building core-setup -As this has very little platform dependency it is unlikely this needs to be touched. +As this has very little platform dependency it is unlikely this needs to be touched. If we want to do this to pick up fix or for consistency than ... TBD ``` diff --git a/docs/workflow/building/libraries/webassembly-instructions.md b/docs/workflow/building/libraries/webassembly-instructions.md index 408aa4e908270..973047b2d54cc 100644 --- a/docs/workflow/building/libraries/webassembly-instructions.md +++ b/docs/workflow/building/libraries/webassembly-instructions.md @@ -14,7 +14,7 @@ export EMSDK_PATH=PATH_TO_SDK_INSTALL/emsdk ## Building everything -At this time no other build configurations are necessary to start building for WebAssembly. The CoreLib for WebAssembly build configurations will be built by default using the WebAssembly configuration shown below. +At this time no other build configurations are necessary to start building for WebAssembly. The CoreLib for WebAssembly build configurations will be built by default using the WebAssembly configuration shown below. This document explains how to work on libraries. In order to work on library projects or run library tests it is necessary to have built the runtime to give the libraries something to run on. If you haven't already done so, please read [this document](../../README.md#Configurations) to understand configurations. @@ -90,7 +90,7 @@ The WebAssembly implementation files are built and made available in the artifac For Linux and MacOSX: ```bash -./dotnet.sh build /p:Configuration=Debug|Release /p:TargetArchitecture=wasm /p:TargetOS=Browser src/libraries/src.proj /t:BuildWasmRuntimes +./dotnet.sh build /p:Configuration=Debug|Release /p:TargetArchitecture=wasm /p:TargetOS=Browser src/libraries/src.proj /t:BuildWasmRuntimes ``` __Note__: A `Debug` build sets the following environment variables by default. When built from the command line this way the `Configuration` value is case sensitive. @@ -104,7 +104,7 @@ __Note__: A `Debug` build sets the following environment variables by default. #### Example: ``` -L: GC_MAJOR_SWEEP: major size: 752K in use: 39K +L: GC_MAJOR_SWEEP: major size: 752K in use: 39K L: GC_MAJOR: (user request) time 3.00ms, stw 3.00ms los size: 0K in use: 0K ``` @@ -125,7 +125,7 @@ First update emscripten version in the [webassembly Dockerfile](https://github.c ENV EMSCRIPTEN_VERSION=1.39.16 ``` -Submit a PR request with the updated version, wait for all checks to pass and for the request to be merged. A [master.json file](https://github.com/dotnet/versions/blob/master/build-info/docker/image-info.dotnet-dotnet-buildtools-prereqs-docker-master.json#L1126) will be updated with the a new docker image. +Submit a PR request with the updated version, wait for all checks to pass and for the request to be merged. A [master.json file](https://github.com/dotnet/versions/blob/master/build-info/docker/image-info.dotnet-dotnet-buildtools-prereqs-docker-master.json#L1126) will be updated with the a new docker image. ``` { @@ -154,4 +154,4 @@ container: registry: mcr ``` -Open a PR request with the new image. +Open a PR request with the new image. diff --git a/docs/workflow/building/mono/README.md b/docs/workflow/building/mono/README.md index 276ae512cedab..caa80181c8e88 100644 --- a/docs/workflow/building/mono/README.md +++ b/docs/workflow/building/mono/README.md @@ -13,14 +13,14 @@ Before proceeding further, please click on the link above that matches your mach To build the Mono runtime, you must first do a complete runtime build (coreclr, libraries, and then mono). At the repo root, simply execute: ```bash -./build.sh +./build.sh ``` or on Windows, ```bat build.cmd ``` Note that the debug configuration is the default option. It generates a 'debug' output and that includes asserts, fewer code optimizations, and is easier for debugging. If you want to make performance measurements, or just want tests to execute more quickly, you can also build the 'release' version which does not have these checks by adding the flag `-configuration release` (or `-c release`). - + Once you've built the whole runtime and assuming you want to work with just mono, you want to use the following command: @@ -40,7 +40,7 @@ Here are a list of build arguments that may be of use: `/p:MonoEnableLlvm=true /p:MonoLLVMDir=path/to/llvm` - Builds mono w/ LLVM from a custom path -`/p:MonoEnableLlvm=true /p:MonoLLVMDir=path/to/llvm /p:MonoLLVMUseCxx11Abi=true` - Builds mono w/ LLVM +`/p:MonoEnableLlvm=true /p:MonoLLVMDir=path/to/llvm /p:MonoLLVMUseCxx11Abi=true` - Builds mono w/ LLVM from a custom path (and that LLVM was built with C++11 ABI) For `build.sh` @@ -68,7 +68,7 @@ The following packages will be created under `artifacts\packages\ - `transport.Microsoft.NETCore.Runtime.Mono.-dev..1.nupkg` - `transport.runtime..Microsoft.NETCore.Runtime.Mono.-dev..1.nupkg` -## Important Notes +## Important Notes Test binaries are not yet available for mono. diff --git a/docs/workflow/ci/coreclr-ci-health.md b/docs/workflow/ci/coreclr-ci-health.md index 021ad7d7fc7ff..8aa9c5b0e7e99 100644 --- a/docs/workflow/ci/coreclr-ci-health.md +++ b/docs/workflow/ci/coreclr-ci-health.md @@ -15,7 +15,7 @@ https://github.com/dotnet/runtime/issues/702 was opened as a way to simply view #### Terminology -In order to follow some of the terminology used, there is an expected familiarity of Azure DevOps required. For an in depth guide with Azure DevOps pipeline definitions, please see: https://docs.microsoft.com/en-us/azure/devops/pipelines/yaml-schema?view=azure-devops&tabs=schema. +In order to follow some of the terminology used, there is an expected familiarity of Azure DevOps required. For an in depth guide with Azure DevOps pipeline definitions, please see: https://docs.microsoft.com/en-us/azure/devops/pipelines/yaml-schema?view=azure-devops&tabs=schema. The most common terminology and most important are the different containers work happens in. @@ -25,7 +25,7 @@ The most common terminology and most important are the different containers work `Job`: Jobs are the smallest unit of work which happen on a unique machine. Jobs by default run in parallel, but may be set to depend on another job. **Every job executes its work on a unique machine**. -`Steps`: Steps are the smallest unit of work, they generally correspond to one command that will happen in a job. Normally a job contains steps, which execute serially. +`Steps`: Steps are the smallest unit of work, they generally correspond to one command that will happen in a job. Normally a job contains steps, which execute serially. ## CI Overview @@ -157,7 +157,7 @@ This tracks the overall end to end run time of a pipeline. This graph is useful Specifically the query is useful for finding out whether a specific Helix Queue (a group of machines) is overloaded or not. This is useful for diagnosing arm hardware issues, because we have a fixed amount that is easily overloaded. ``` -WorkItems +WorkItems | where QueueName == "ubuntu.1804.armarch.open" | extend DaysAgo = datetime_diff('Day', now(), Queued) | extend QueueTimeInSeconds = datetime_diff('Second', Started, Queued) diff --git a/docs/workflow/debugging/libraries/debugging-packages.md b/docs/workflow/debugging/libraries/debugging-packages.md index 5b25a1ffb14fe..f72887149b7f4 100644 --- a/docs/workflow/debugging/libraries/debugging-packages.md +++ b/docs/workflow/debugging/libraries/debugging-packages.md @@ -14,13 +14,13 @@ Debugging CoreFX build issues (This documentation is work in progress.) -I found the following process to help when investigating some of the build issues caused by incorrect packaging. +I found the following process to help when investigating some of the build issues caused by incorrect packaging. -To quickly validate if a given project compiles on all supported configurations use `dotnet build /t:RebuildAll`. This applies for running tests as well. For more information, see [Building individual libraries](../../building/libraries/README.md#building-individual-libraries) +To quickly validate if a given project compiles on all supported configurations use `dotnet build /t:RebuildAll`. This applies for running tests as well. For more information, see [Building individual libraries](../../building/libraries/README.md#building-individual-libraries) Assuming the current directory is `\src\contractname\`: -1. Build the `\ref` folder: `dotnet build` +1. Build the `\ref` folder: `dotnet build` Check the logs for output such as: @@ -58,7 +58,7 @@ Use the same technique above to ensure that the binaries include the correct imp Ensure that all Build Pivots are actually being built. This should build all .\ref and .\src variations as well as actually creating the NuGet packages. -Verify that the contents of the nuspec as well as the actual package is correct. You can find the packages by searching for the following pattern in the msbuild output: +Verify that the contents of the nuspec as well as the actual package is correct. You can find the packages by searching for the following pattern in the msbuild output: ``` GetPkgProjPackageDependencies: diff --git a/docs/workflow/debugging/libraries/windows-instructions.md b/docs/workflow/debugging/libraries/windows-instructions.md index ec075d8b10bda..46d916e74da89 100644 --- a/docs/workflow/debugging/libraries/windows-instructions.md +++ b/docs/workflow/debugging/libraries/windows-instructions.md @@ -25,16 +25,16 @@ As Administrator: windbg -I ``` -You may need to do this for both x64 and x86 versions. +You may need to do this for both x64 and x86 versions. Any application that crashes should now automatically start a WinDBG session. ## Debugging tests To run a single test from command line: -* Locate the test binary folder based on the CSPROJ name. +* Locate the test binary folder based on the CSPROJ name. For example: `src\System.Net.Sockets\tests\Functional\System.Net.Sockets.Tests.csproj` will build and output binaries at `bin\tests\Windows_NT.AnyCPU.Debug\System.Net.Sockets.Tests\netcoreapp1.0`. - + * Execute the test Assuming that your repo is at `C:\corefx`: @@ -44,7 +44,7 @@ cd C:\corefx\bin\tests\Windows_NT.AnyCPU.Debug\System.Net.Sockets.Tests\netcorea C:\corefx\bin\tests\Windows_NT.AnyCPU.Debug\System.Net.Sockets.Tests\netcoreapp1.0\CoreRun.exe xunit.console.dll System.Net.Sockets.Tests.dll -xml testResults.xml -notrait category=nonwindowstests -notrait category=OuterLoop -notrait category=failing ``` -* If the test crashes or encounters a `Debugger.Launch()` method call, WinDBG will automatically start and attach to the `CoreRun.exe` process +* If the test crashes or encounters a `Debugger.Launch()` method call, WinDBG will automatically start and attach to the `CoreRun.exe` process The following commands will properly configure the debugging extension and fix symbol and source-code references: @@ -129,7 +129,7 @@ Logs are going to be placed in %SYSTEMDRIVE%\sockets.etl. 1. Install [PerfView](https://github.com/Microsoft/perfview/blob/master/documentation/Downloading.md) 2. Run PerfView as Administrator -3. Press Alt+C to collect events +3. Press Alt+C to collect events 4. Disable all other collection parameters 5. Add Additional Providers (see below - Important: keep the "*" wildcard before the names.) @@ -137,7 +137,7 @@ Logs are going to be placed in %SYSTEMDRIVE%\sockets.etl. ### Built-in EventSource tracing -The following EventSources are built-in to CoreFX. The ones that are not marked as [__TestCode__] can be enabled in production scenarios for log collection. +The following EventSources are built-in to CoreFX. The ones that are not marked as [__TestCode__] can be enabled in production scenarios for log collection. #### Global * `*System.Diagnostics.Eventing.FrameworkEventSource {8E9F5090-2D75-4d03-8A81-E5AFBF85DAF1}`: Global EventSource used by multiple namespaces. @@ -169,5 +169,5 @@ Helper scripts are available at https://github.com/dotnet/runtime/tree/master/sr * `*System.Threading.Tasks.Parallel.EventSource`: Provides an event source for tracing TPL information. * `*System.Threading.Tasks.Dataflow.DataflowEventSource {16F53577-E41D-43D4-B47E-C17025BF4025}`: Provides an event source for tracing Dataflow information. -## Notes +## Notes * You can find the test invocation command-line by looking at the logs generated after the `dotnet build /t:test` within the test folder. diff --git a/docs/workflow/testing/coreclr/running-aspnet-benchmarks-with-crossgen2.md b/docs/workflow/testing/coreclr/running-aspnet-benchmarks-with-crossgen2.md index 55ffc5f7459aa..4d5c20b50f089 100644 --- a/docs/workflow/testing/coreclr/running-aspnet-benchmarks-with-crossgen2.md +++ b/docs/workflow/testing/coreclr/running-aspnet-benchmarks-with-crossgen2.md @@ -84,10 +84,10 @@ profiles: cores: 12 jobs: application: - endpoints: + endpoints: - http://asp-perf-win:5001 load: - endpoints: + endpoints: - http://asp-perf-load:5001 aspnet-physical-lin: @@ -96,18 +96,18 @@ profiles: cores: 12 jobs: application: - endpoints: + endpoints: - http://asp-perf-lin:5001 load: - endpoints: + endpoints: - http://asp-perf-load:5001 ``` Now, what does this configuration mean and how is it applied? Let's go over the most important fields to understand its main functionality. -* **Imports**: These are external tools hosted in the Benchmarks repo. -In this case, we only need `wrk`, which is a tool that loads and tests +* **Imports**: These are external tools hosted in the Benchmarks repo. +In this case, we only need `wrk`, which is a tool that loads and tests performance in Web applications. * **Jobs**: Here go the job descriptions. A job in this context is the set of diff --git a/docs/workflow/testing/libraries/testing-android.md b/docs/workflow/testing/libraries/testing-android.md index 7ac22f84062fa..5ad1b27f79004 100644 --- a/docs/workflow/testing/libraries/testing-android.md +++ b/docs/workflow/testing/libraries/testing-android.md @@ -88,4 +88,4 @@ Or simply open `logcat` window in Android Studio or Visual Stuido. ### Existing Limitations - `-os Android` is not supported for Windows yet (`WSL` can be used instead) - XHarness.CLI is not able to boot emulators yet (so you need to boot via `AVD Manager` or IDE) -- AOT and Interpreter modes are not supported yet \ No newline at end of file +- AOT and Interpreter modes are not supported yet diff --git a/docs/workflow/testing/libraries/testing-wasm.md b/docs/workflow/testing/libraries/testing-wasm.md index 65de9d32ec9e9..8c08a729074f9 100644 --- a/docs/workflow/testing/libraries/testing-wasm.md +++ b/docs/workflow/testing/libraries/testing-wasm.md @@ -7,14 +7,14 @@ In order to be able to run tests, the following JavaScript engines should be ins They can be installed as a part of [jsvu](https://github.com/GoogleChromeLabs/jsvu). -Please make sure that a JavaScript engine binary is available via command line, +Please make sure that a JavaScript engine binary is available via command line, e.g. for V8: ```bash $ v8 V8 version 8.5.62 ``` -If you use `jsvu`, first add its location to PATH variable +If you use `jsvu`, first add its location to PATH variable e.g. for V8 ```bash @@ -39,7 +39,7 @@ The following shows how to run tests for a specific library ``` ### Running tests using different JavaScript engines -It's possible to set a JavaScript engine explicitly by adding `/p:JSEngine` property: +It's possible to set a JavaScript engine explicitly by adding `/p:JSEngine` property: ``` ./dotnet.sh build /t:Test src/libraries/System.AppContext/tests /p:TargetOS=Browser /p:TargetArchitecture=wasm /p:Configuration=Release /p:JSEngine=SpiderMonkey diff --git a/docs/workflow/testing/mono/testing.md b/docs/workflow/testing/mono/testing.md index e710b77e00b4a..1bffa1de3389b 100644 --- a/docs/workflow/testing/mono/testing.md +++ b/docs/workflow/testing/mono/testing.md @@ -1,10 +1,10 @@ # Running Tests using Mono Runtime ## Running Runtime Tests -We currently only support running tests against coreclr. There are additional mono runtime tests in mono/mono, but they +We currently only support running tests against coreclr. There are additional mono runtime tests in mono/mono, but they have not been moved over yet. Simply run the following command: -``` +``` dotnet build /t:RunCoreClrTests $(REPO_ROOT)/src/mono/mono.proj ``` @@ -36,8 +36,8 @@ dotnet build /t:Test /p:RuntimeFlavor=mono ``` # Patching Local dotnet (.dotnet-mono) -Another way to test mono out is by 'patching' a local dotnet with our runtime bits. This is a good way to write simple -test programs and get a glimpse of how mono will work with the dotnet tooling. +Another way to test mono out is by 'patching' a local dotnet with our runtime bits. This is a good way to write simple +test programs and get a glimpse of how mono will work with the dotnet tooling. To generate a local .dotnet-mono, execute this command: @@ -51,4 +51,4 @@ You can then, for example, run our HelloWorld sample via: dotnet build -c Release $(REPO_ROOT)/src/mono/netcore/sample/HelloWorld MONO_ENV_OPTIONS="" COMPlus_DebugWriteToStdErr=1 \ $(REPO_ROOT)/.dotnet-mono/dotnet $(REPO_ROOT)/src/mono/netcore/sample/HelloWorld/bin/HelloWorld.dll -``` \ No newline at end of file +``` diff --git a/docs/workflow/testing/visualstudio.md b/docs/workflow/testing/visualstudio.md index 106f069f7df78..b519aa3a400a9 100644 --- a/docs/workflow/testing/visualstudio.md +++ b/docs/workflow/testing/visualstudio.md @@ -1,18 +1,18 @@ # Working in dotnet/runtime using Visual Studio -Visual Studio is a great tool to use when working in the dotnet/runtime repo. +Visual Studio is a great tool to use when working in the dotnet/runtime repo. Almost all its features should work well, but there are a few special considerations to bear in mind: -## Test Explorer +## Test Explorer You can run tests from the Visual Studio Test Explorer, but there are a few settings you need: - Enable `Auto detect runsettings Files` (`Test Explorer window -> Settings button -> Options`). Test parameters (like which `dotnet` host to use) are persisted in an auto-generated .runsettings file, and it's important that Visual Studio knows to use it. - Set `Processor Architecture for AnyCPU project` to `auto` (`Test Explorer window -> Settings button`). -- Consider whether to disable `Discover tests in real time from C# and Visual Basic .NET source files` (`Test explorer window -> Settings button -> Options`). +- Consider whether to disable `Discover tests in real time from C# and Visual Basic .NET source files` (`Test explorer window -> Settings button -> Options`). - You may want it enabled if you're actively writing new tests and want them to show up in Test Explorer without building first. - You may want it disabled if you're mostly running existing tests, and some of them have conditional attributes. Many of our unit tests have attributes, like `[SkipOnTargetFramework]`, to indicate that they're only valid in certain configurations. Because the real-time discovery feature does not currently recognize these attributes the tests will show up in Test Explorer as well, and fail or possibly hang when you try to run them. -- Consider whether to enable `Run tests in Parallel` (`Test Explorer window -> Settings button`). +- Consider whether to enable `Run tests in Parallel` (`Test Explorer window -> Settings button`). - You may want it enabled if some of the unit tests you're working with run slowly or there's many of them. - You may want it disabled if you want to simplify debugging or viewing debug output. diff --git a/eng/docker/Readme.md b/eng/docker/Readme.md index 0f73be1740e77..d2482bb08bf09 100644 --- a/eng/docker/Readme.md +++ b/eng/docker/Readme.md @@ -5,7 +5,7 @@ Provides reusable docker build infrastructure for the dotnet/runtime repo. ## libraries-sdk Dockerfiles The `libraries-sdk` Dockerfiles can be used to build dotnet sdk docker images -that contain the current libraries built from source. +that contain the current libraries built from source. These images can be used to build dockerized dotnet services that target the current libraries. Currently, debian and windows nanoserver sdk's are supported. diff --git a/eng/pipelines/coreclr/readme.md b/eng/pipelines/coreclr/readme.md index 71c391dcc9088..0969c41af2144 100644 --- a/eng/pipelines/coreclr/readme.md +++ b/eng/pipelines/coreclr/readme.md @@ -56,4 +56,4 @@ internal.yml -> platform-matrix.yml -------> build-job.yml -------> xplat-job.ym | (passed-in jobTemplate) | (arcade) \------> test-job.yml ------/ \------> format-job.yml ----/ -``` \ No newline at end of file +``` diff --git a/src/coreclr/src/dlls/mscoree/coreclr/README.md b/src/coreclr/src/dlls/mscoree/coreclr/README.md index 0e291a87a3d4d..b4f3e6f8845d2 100644 --- a/src/coreclr/src/dlls/mscoree/coreclr/README.md +++ b/src/coreclr/src/dlls/mscoree/coreclr/README.md @@ -4,4 +4,4 @@ but if that changes we can always create a little nicer tooling for it. dump\_helper\_resource.bin is used to populate the DUMP\_HELPER resource inside coreclr.dll on Windows. When an application crashes, Windows MinidumpWriteDump is planning to scan modules looking for this resource. The content of the resource is expected to be the name of a dll in the same folder, encoded in UTF8, null terminated, that implements the CLRDataCreateInterface function. For OS security purposes MinidumpWriteDump will do an authenticode signing check before loading the indicated binary, however if your build isn't -signed you can get around this limitation by registering it at HKLM\Software\Microsoft\WindowsNT\CurrentVersion\MiniDumpAuxilliaryDlls. \ No newline at end of file +signed you can get around this limitation by registering it at HKLM\Software\Microsoft\WindowsNT\CurrentVersion\MiniDumpAuxilliaryDlls. diff --git a/src/coreclr/src/inc/readme.md b/src/coreclr/src/inc/readme.md index 1e7754ea5e07b..e8f3405859e0f 100644 --- a/src/coreclr/src/inc/readme.md +++ b/src/coreclr/src/inc/readme.md @@ -9,4 +9,4 @@ for midl.exe which did that conversion so we work around the issue by doing: - If needed, adjust any of the .cpp files in src\pal\prebuilt\idl\ by hand, using the corresponding artifacts\obj\Windows_NT.x64.Debug\src\inc\idls_out\*_i.c as a guide. Typically this is just adding MIDL_DEFINE_GUID(...) for any new classes/interfaces that have been added to the idl file. -Include these src changes with the remainder of your work when you submit a PR. \ No newline at end of file +Include these src changes with the remainder of your work when you submit a PR. diff --git a/src/coreclr/src/pal/src/libunwind/README.md b/src/coreclr/src/pal/src/libunwind/README.md index 100b93820ade4..e845566c06f9b 100644 --- a/src/coreclr/src/pal/src/libunwind/README.md +++ b/src/coreclr/src/pal/src/libunwind/README.md @@ -1 +1 @@ -README \ No newline at end of file +README diff --git a/src/coreclr/src/tools/ILVerification/README.md b/src/coreclr/src/tools/ILVerification/README.md index 2c3277a23a10b..48ed8724f6640 100644 --- a/src/coreclr/src/tools/ILVerification/README.md +++ b/src/coreclr/src/tools/ILVerification/README.md @@ -1,3 +1,3 @@ # ILVerification -The ILVerification library is part of the ILVerify project. See details under [src/coreclr/src/tools/ILVerify](../ILVerify). \ No newline at end of file +The ILVerification library is part of the ILVerify project. See details under [src/coreclr/src/tools/ILVerify](../ILVerify). diff --git a/src/coreclr/src/tools/dotnet-pgo/README.md b/src/coreclr/src/tools/dotnet-pgo/README.md index 60b7101ad0dd1..a377bf1e4e23e 100644 --- a/src/coreclr/src/tools/dotnet-pgo/README.md +++ b/src/coreclr/src/tools/dotnet-pgo/README.md @@ -32,7 +32,7 @@ Note, this tool requires MethodDetails events which are produced by the .NET 5.0 ``` "dotnet trace collect -p 73060 --providers Microsoft-Windows-DotNETRuntime:0x6000080018:5" ``` - + - Capture events from process 73060 where we capture only JIT events using EventPipe tracing ``` "dotnet trace collect -p 73060 --providers Microsoft-Windows-DotNETRuntime:0x4000080018:5" diff --git a/src/coreclr/src/tools/dotnet-pgo/dotnet-pgo-experiment.md b/src/coreclr/src/tools/dotnet-pgo/dotnet-pgo-experiment.md index c47d9d227b404..14f9c63bbe1c0 100644 --- a/src/coreclr/src/tools/dotnet-pgo/dotnet-pgo-experiment.md +++ b/src/coreclr/src/tools/dotnet-pgo/dotnet-pgo-experiment.md @@ -1,7 +1,7 @@ # Experiments towards a Profile Data pipeline for .NET ----- -The .NET Runtime has a long history of providing instrumentation based profile guided optimization -for use internally at Microsoft, and for scenarios involving extremely high value customers. To +The .NET Runtime has a long history of providing instrumentation based profile guided optimization +for use internally at Microsoft, and for scenarios involving extremely high value customers. To this end the team built the IBC (instrumented block count) infrastructure into the runtime/ngen, and IBCMerge as a tool for manipulating .ibc files. Over the last few years, the structure of these technologies and tools has shown that they are not ideal for customer use or even internal use, and @@ -33,19 +33,19 @@ Profile guided optimization in .NET is used to provide benefits for 3 major conc Startup time for an application is primarily improved by avoiding the use of the JIT by ahead of time compiling methods in the application. In addition a profile can allow determination of which methods are hot vs cold, and group methods commonly used together with others. This has been the primary use -of pgo in .NET historically. +of pgo in .NET historically. Pgo is used to address size on disk concerns of R2R binaries where the default R2R strategy is too aggressive and produces binaries that are excessively large. The idea in that case is to only generate the functions specifically referenced in some profile instead of every method the heuristic indicates may be interesting. -Application throughput performance has historically been the primary use of pgo data for C++ compilers. +Application throughput performance has historically been the primary use of pgo data for C++ compilers. .NET has history with the use of instrumented per block counts, but this data is not generally processed in an effective manner by the JIT. This proposal aims to revitalize efforts to make good use of profile guided data to improve code quality. Over time, it is expected that not only will profile data be used at build time, but that it will also be used to do runtime profile instrumentation. - + # Proposal Contents Profile guided optimization is a combination of effort across a swath of components. @@ -59,7 +59,7 @@ And there are a series of components that need to be modified 2. Instrumenting jit (clrjit) 3. Trace processing tool (dotnet-pgo) 4. AOT compilation tool (crossgen2) -6. Consuming runtime (coreclr) +6. Consuming runtime (coreclr) 7. Diagnostic tools (r2rdump, dotnet-pgo) ## Conceptual model of `InstrumentationData` @@ -68,7 +68,7 @@ statically, and instead is determined through instrumentation of the code. The f is expected to be defined by the JIT team, and be specific to the probes inserted, and may very well change over time. It is composed of two sections -1. The descriptor used to describe the probes, this is fixed at JIT time, and describes the meaning of the data. +1. The descriptor used to describe the probes, this is fixed at JIT time, and describes the meaning of the data. 2. The data gathered as counts, and values that will be used to perform further optimization. Both of these data blocks are able to contain type and method data, where the concept is that it is @@ -78,7 +78,7 @@ but there are also plausible cases for gathering each kind of data in both secti be made general to support both. Instrumentation Data shall have a version number independent of the general R2R versioning scheme. The intention is for this form of `InstrumentationData` to become useable for both out of line instrumentation as described in this document, as well as only tiered -compilation rejit scenarios with in process profiling. +compilation rejit scenarios with in process profiling. ## Trace data format Runtime instrumentation will be accomplished through 4 events, 2 of which are already existing @@ -149,7 +149,7 @@ Profile data shall be encoded into the R2R FileFormat in a new section named `RE This section shall hold a version number, and a single `NativeHashtable` that contains a mapping from type/method to the pair of Desc and Data. TODO define how Desc and Data are encoded. The intention is to store exactly the same data as is stored in the PGO data file, except that the instrumentation data version must be the same for -all data chunks. +all data chunks. ## Instrumenting Runtime The runtime shall be responsible for choosing when to execute instrumentation, allocating the tracing buffers @@ -197,7 +197,7 @@ data that may be embedded into the R2R file format for possible consumption by t ## Trace processing tool The trace processing tool is responsible for reading the trace files as produced by perfview/dotnet trace, and producing .MIBC files. The process should be a straightforward format translation for instrumentation data. The -`FunctionTouchOrder` and existence of the method shall be based on the `JitStarted` and `R2EEntryPoint` events. +`FunctionTouchOrder` and existence of the method shall be based on the `JitStarted` and `R2EEntryPoint` events. ## AOT Compilation tool AOT compilation shall use the profile guided data in several ways. @@ -210,7 +210,7 @@ data for the method being compiled, and for both the uninstantiated method and i as are present. The jit is responsible for merging these multiple data sources. In addition the JIT may optionally choose to generate a profile guided data block for association with the precompiled -code for use in re-jit scenarios, and information about related method code layout for the code, and optionally a +code for use in re-jit scenarios, and information about related method code layout for the code, and optionally a portion of the function body which is to be placed into a cold code section. The intention here it to allow some algorithm such as Pettis-Hansen or a more modern variant (eg https://research.fb.com/wp-content/uploads/2017/01/cgo2017-hfsort-final1.pdf) to be used to optimize code layout. @@ -219,7 +219,7 @@ to be used to optimize code layout. If present in an R2R file, when a method is rejitted, the runtime shall provide a means for the jit to see instrumentation data from either previous compiles in process, and/or from the R2R file. This shall provide a means for the JIT to choose whether or not the method should be recompiled, or possibly to inform it about optimization opportunities that are -too expensive to compute at jit time, but could be computed by the AOT compiler, or other such ideas. +too expensive to compute at jit time, but could be computed by the AOT compiler, or other such ideas. As a means of doing this, options such as the following will be given to the jit to provide custom behavior. 1. Ignore the profile data and rejit. @@ -235,4 +235,4 @@ would be to use this as a means for adaptive or speculative optimization. The tools r2rdump and dotnet-pgo shall provide a means for dumping their inputs. For most forms of data this is fairly straightforward, but for `InstrumentationData`, there shall be a common dump tool written in managed code that can provide a human readable dump of the data. r2rdump, dotnet-pgo, and possibly sos will all be able to share -this codebase for examination of the data structures in r2r files, traces, and runtime environments respectively. +this codebase for examination of the data structures in r2r files, traces, and runtime environments respectively. diff --git a/src/installer/managed/Microsoft.NET.HostModel/README.md b/src/installer/managed/Microsoft.NET.HostModel/README.md index 2a0d1339907ee..f473c6c8843a1 100644 --- a/src/installer/managed/Microsoft.NET.HostModel/README.md +++ b/src/installer/managed/Microsoft.NET.HostModel/README.md @@ -11,9 +11,9 @@ HostModel is a library used by the [SDK](https://github.com/dotnet/sdk) to perfo The HostModel library is in the Runtime repo because: * The implementations of the host and HostModel are closely related, which facilitates easy development, update, and testing. -* Separating the HostModel implementation from SDK repo repo aligns with code ownership, and facilitates maintenance. +* Separating the HostModel implementation from SDK repo repo aligns with code ownership, and facilitates maintenance. The build targets/tasks that use the HostModel library are in the SDK repo because: -* This facilitates the MSBuild tasks to be multi-targeted. -* It helps generate localized error messages, since SDK repo has the localization infrastructure. +* This facilitates the MSBuild tasks to be multi-targeted. +* It helps generate localized error messages, since SDK repo has the localization infrastructure. diff --git a/src/installer/tests/scripts/linux-test/README.md b/src/installer/tests/scripts/linux-test/README.md index 9492e3c71958a..d753785157300 100644 --- a/src/installer/tests/scripts/linux-test/README.md +++ b/src/installer/tests/scripts/linux-test/README.md @@ -2,33 +2,33 @@ This project has the purpose to automate verification test for .NET Runtime and To have this test running in your local machine do the following steps: 1. Download VerificationTestOnDocker.sh, RuntimeInstallation.sh, SdkInstallation.sh, images.txt in the same folder -2. Update images.txt with images name you want to run the installation test +2. Update images.txt with images name you want to run the installation test 3. Run $ ./VerificationTestOnDocker.sh \ \ \ The options are: -* \ - * runtime - verification test for .NET Runtime Linux packages +* \ + * runtime - verification test for .NET Runtime Linux packages * sdk - verification test for .NET SDK Linux packages -* \ - * latest - install the latest available .NET package from our master repository - * \ - install the package corresponding to this version number -* \ +* \ + * latest - install the latest available .NET package from our master repository + * \ - install the package corresponding to this version number +* \ * install - verification test for install - * install uninstall - verification test for install and uninstall + * install uninstall - verification test for install and uninstall -The script VerificationTestOnDocker.sh is responsible for read a file (images.txt) containing docker images and run a docker container for each image specified in that file. Inside each container it will be executed the script to install .NET Runtime (RuntimeInstallation.sh) or .NET SDK (SdkInstallation.sh). +The script VerificationTestOnDocker.sh is responsible for read a file (images.txt) containing docker images and run a docker container for each image specified in that file. Inside each container it will be executed the script to install .NET Runtime (RuntimeInstallation.sh) or .NET SDK (SdkInstallation.sh). Both scripts RuntimeInstallation.sh and SdkInstallation.sh automatically identify what distro and version is running in the current machine and can install and uninstall the latest version of .NET Runtime/Sdk packages corresponding to that distro & version. The installation's stdout for all containers is redirected to a single file (logfile.txt). In the end of this file (logfile.txt) it's also displayed the results of the test, printing for each distro and version the result 'failed' or 'passed'. .NET packages are downloaded from the blob https://dotnetcli.blob.core.windows.net/dotnet -This project takes in account: - -> dotnet-sdk depends on dotnet-runtime and aspnet-runtime - -> aspnet-runtime depends on dotnet-runtime (can be different to what dotnet-sdk depends on) - -> dotnet-runtime-deps depends on system packages - -> .NET runtime carries: dotnet-runtime-deps, dotnet-host, dotnet-hostfxr and dotnet-runtime. +This project takes in account: + -> dotnet-sdk depends on dotnet-runtime and aspnet-runtime + -> aspnet-runtime depends on dotnet-runtime (can be different to what dotnet-sdk depends on) + -> dotnet-runtime-deps depends on system packages + -> .NET runtime carries: dotnet-runtime-deps, dotnet-host, dotnet-hostfxr and dotnet-runtime. Changes on how dotnet runtime packages are structured or modification on the packages dependencies may affect the verification test result. @@ -37,13 +37,13 @@ This verification test depends on docker images and the test result can be a fal The script allows automated test only for the following distro & version: -| Distro | Version | +| Distro | Version | |--------|---------| -| Ubuntu | 14.04, 16.04, 18.04 | -| Debian | 8, 9 | +| Ubuntu | 14.04, 16.04, 18.04 | +| Debian | 8, 9 | | Centos | 7 | | Fedora | 27 | | OpenSUSE | 42 | | Oracle Linux | 7 | | RHEL | 7 | -| SLES | 12 | +| SLES | 12 | diff --git a/src/libraries/Common/src/System/Net/Internals/readme.md b/src/libraries/Common/src/System/Net/Internals/readme.md index e53ede142a227..58353735bfa45 100644 --- a/src/libraries/Common/src/System/Net/Internals/readme.md +++ b/src/libraries/Common/src/System/Net/Internals/readme.md @@ -1,4 +1,4 @@ -Contracts such as NameResolution and Sockets require internal access to Primitive types. Binary copies of these types have been made within the System.Net.Internals namespace using #ifdef pragmas (source code is reused). +Contracts such as NameResolution and Sockets require internal access to Primitive types. Binary copies of these types have been made within the System.Net.Internals namespace using #ifdef pragmas (source code is reused). An adaptation layer between .Internals and public types exists within the Extensions classes. diff --git a/src/libraries/Common/tests/System/Net/EnterpriseTests/setup/README.md b/src/libraries/Common/tests/System/Net/EnterpriseTests/setup/README.md index 0ce6f23b28256..24a1db60f5a21 100644 --- a/src/libraries/Common/tests/System/Net/EnterpriseTests/setup/README.md +++ b/src/libraries/Common/tests/System/Net/EnterpriseTests/setup/README.md @@ -1,7 +1,7 @@ # Enterprise Scenario Testing ## What Are Enterprise Scenarios? -There are many definitions for enterprise scenarios. But generally in terms of how .NET Core networking APIs are used, enterprise scenarios are those networking scenarios that are fundamentally used by businesses (a.k.a enterprises) compared with consumers. As such, they use networking components, protocols, and security authentication mechanisms that are not used by most consumers using their home networking and Internet connections. +There are many definitions for enterprise scenarios. But generally in terms of how .NET Core networking APIs are used, enterprise scenarios are those networking scenarios that are fundamentally used by businesses (a.k.a enterprises) compared with consumers. As such, they use networking components, protocols, and security authentication mechanisms that are not used by most consumers using their home networking and Internet connections. ## Networking Components of Enterprise Scenarios Enterprise scenarios typically see the following kinds of components/protocols/security: diff --git a/src/libraries/Common/tests/System/Net/Prerequisites/README.md b/src/libraries/Common/tests/System/Net/Prerequisites/README.md index cd1e217b9d1f0..7dbb09241637d 100644 --- a/src/libraries/Common/tests/System/Net/Prerequisites/README.md +++ b/src/libraries/Common/tests/System/Net/Prerequisites/README.md @@ -10,7 +10,7 @@ Contains source files for the networking test servers in Azure or a private IIS Note: the `config.ps1` file has been added to .gitignore to prevent it being updated in the master branch. -### Build the server applications +### Build the server applications Prepare the $DOTNET_TEST_NET_CLIENT_Machine as any Dev station following the instructions at https://github.com/dotnet/runtime/blob/master/docs/workflow/requirements/windows-requirements.md. Ensure that you can build and test CoreFX on this machine. In addition, you will also need to install the _Azure development_ workload for Visual Studio 2017. @@ -30,7 +30,7 @@ You should now find a folder named `IISApplications` within the Deployment folde Skip this step if previously completed and all machines are already part of a domain to which you have Administrator rights. This will join all machines to a test Active Directory and enable Windows Remoting. -1. Copy the Deployment folder to each of the machines. +1. Copy the Deployment folder to each of the machines. 2. Run the .\setup.ps1 script on the machine designated to become the Domain Controller. Once complete, the machine will reboot. 3. Run the .\setup.ps1 script on all other domain joined machines. Once complete, the machines will reboot. diff --git a/src/libraries/Microsoft.CSharp/README.md b/src/libraries/Microsoft.CSharp/README.md index 1b8c521c74813..bb8dc6d468968 100644 --- a/src/libraries/Microsoft.CSharp/README.md +++ b/src/libraries/Microsoft.CSharp/README.md @@ -5,4 +5,4 @@ The library is effectively archived. The library and supporting language features are mature and no longer evolving, and the risk of code change likely exceeds the benefit. We will consider changes that address significant bugs or regressions, or changes that are necessary to continue shipping the binaries. -Other changes will be rejected. \ No newline at end of file +Other changes will be rejected. diff --git a/src/libraries/Microsoft.Extensions.DependencyInjection/README.md b/src/libraries/Microsoft.Extensions.DependencyInjection/README.md index 8d20dd4d12705..2b0d2330ff95d 100644 --- a/src/libraries/Microsoft.Extensions.DependencyInjection/README.md +++ b/src/libraries/Microsoft.Extensions.DependencyInjection/README.md @@ -12,4 +12,4 @@ Contains common DI abstractions that ASP.NET Core and Entity Framework Core use. * [**LightInject**](https://github.com/seesharper/LightInject.Microsoft.DependencyInjection) * [**StructureMap**](https://github.com/structuremap/StructureMap.Microsoft.DependencyInjection) * [**Stashbox**](https://github.com/z4kn4fein/stashbox-extensions-dependencyinjection) -* [**Unity**](https://www.nuget.org/packages/Unity.Microsoft.DependencyInjection/) \ No newline at end of file +* [**Unity**](https://www.nuget.org/packages/Unity.Microsoft.DependencyInjection/) diff --git a/src/libraries/Microsoft.VisualBasic.Core/README.md b/src/libraries/Microsoft.VisualBasic.Core/README.md index 46a6cef576200..0f11eaae8dd8b 100644 --- a/src/libraries/Microsoft.VisualBasic.Core/README.md +++ b/src/libraries/Microsoft.VisualBasic.Core/README.md @@ -5,4 +5,4 @@ The library is effectively archived. The library and supporting language features are mature and no longer evolving, and the risk of code change likely exceeds the benefit. We will consider changes that address significant bugs or regressions, or changes that are necessary to continue shipping the binaries. -Other changes will be rejected. \ No newline at end of file +Other changes will be rejected. diff --git a/src/libraries/System.ComponentModel.Composition/mef_guide/README.md b/src/libraries/System.ComponentModel.Composition/mef_guide/README.md index b48d1c71b5f9f..c8c59ad520921 100644 --- a/src/libraries/System.ComponentModel.Composition/mef_guide/README.md +++ b/src/libraries/System.ComponentModel.Composition/mef_guide/README.md @@ -1,6 +1,6 @@ # MEF1 vs. MEF2 -* MEF1 is used to compose different pieces of code together for dynamic extensions. It has attributes like `Export` and `Import`, hooking up import classes to exports. MEF has a catalog that keeps track of exports and looks at assembly, directory, type catalogs to discover exports. Container instantiates objects and satisfies imports. We do a series of object instantiations, catalog lookups, and finding exports to get the model that is handed out from a container. +* MEF1 is used to compose different pieces of code together for dynamic extensions. It has attributes like `Export` and `Import`, hooking up import classes to exports. MEF has a catalog that keeps track of exports and looks at assembly, directory, type catalogs to discover exports. Container instantiates objects and satisfies imports. We do a series of object instantiations, catalog lookups, and finding exports to get the model that is handed out from a container. * MEF2 is purely type-based and in order to compose the graph it creates a catalog at compile-time. It builds graphs using expression trees. The expression trees in MEF2 fundamentally cannot have cycles in them. This is a limitation you get when you go for MEF2 that is an optimzed version of MEF1. This was done to improve performance and to have the graph known already at compile-time. diff --git a/src/libraries/System.Console/tests/ManualTests/Readme.md b/src/libraries/System.Console/tests/ManualTests/Readme.md index 0b6819734d407..6c56bce87840a 100644 --- a/src/libraries/System.Console/tests/ManualTests/Readme.md +++ b/src/libraries/System.Console/tests/ManualTests/Readme.md @@ -10,7 +10,7 @@ To run the suite, follow these steps: ## Instructions for Windows testers -VsTest on Windows redirects console input, so in order to properly execute the manual tests, +VsTest on Windows redirects console input, so in order to properly execute the manual tests, `xunit-console` must be invoked directly. To do this first run ``` diff --git a/src/libraries/System.Data.Odbc/src/DatabaseSetupInstructions.md b/src/libraries/System.Data.Odbc/src/DatabaseSetupInstructions.md index 72a2dc7a503ba..4c3b16f43fc53 100644 --- a/src/libraries/System.Data.Odbc/src/DatabaseSetupInstructions.md +++ b/src/libraries/System.Data.Odbc/src/DatabaseSetupInstructions.md @@ -1,4 +1,4 @@ -# Instructions on how to setup database +# Instructions on how to setup database ## In Fedora 24 container: - `docker ps` shows _id of existing Fedora 24 container @@ -25,7 +25,7 @@ Get the tag name from https://hub.docker.com/r/microsoft/dotnet-buildtools-prere ## Notes on commands used in Linux 14.04 This section describes the process to install unixODBC libraries and SQLite/MSSQL driver for Ubuntu 14.04. -- `sudo su` +- `sudo su` - `curl https://packages.microsoft.com/keys/microsoft.asc | apt-key add -` - `curl https://packages.microsoft.com/config/ubuntu/14.04/prod.list > /etc/apt/sources.list.d/mssql-release.list` - `sudo apt-get update` @@ -106,7 +106,7 @@ Setup=/usr/local/lib/libsqlite3odbc.so Threading=4 ``` -## Notes on commands used in Mac +## Notes on commands used in Mac - `gunzip unixODBC-2.3.4.tar.gz` download unixodbc - `tar xvf unixODBC-2.3.4.tar` - `cd unix...` diff --git a/src/libraries/System.Diagnostics.DiagnosticSource/src/ActivityUserGuide.md b/src/libraries/System.Diagnostics.DiagnosticSource/src/ActivityUserGuide.md index fd1a57fae0f6b..cacc8876c261e 100644 --- a/src/libraries/System.Diagnostics.DiagnosticSource/src/ActivityUserGuide.md +++ b/src/libraries/System.Diagnostics.DiagnosticSource/src/ActivityUserGuide.md @@ -5,7 +5,7 @@ This document describes Activity, a class that allows storing and accessing diag This document provides Activity architecture [overview](#overview) and [usage](#activity-usage). # Overview -When application starts processing an operation e.g. HTTP request or task from queue, it creates an `Activity` to track it through the system as the request is processed. Examples of context stored in `Activity` could be HTTP request path, method, user-agent, or correlation id: all the details important to be logged along with every trace. +When application starts processing an operation e.g. HTTP request or task from queue, it creates an `Activity` to track it through the system as the request is processed. Examples of context stored in `Activity` could be HTTP request path, method, user-agent, or correlation id: all the details important to be logged along with every trace. When application calls external dependency to complete an operation, it may need to pass some of the context (e.g. correlation id) along with dependency call to be able to correlate logs from multiple services. `Activity` provides [Tags](#tags) to represent context which is needed for logging only and [Baggage](#baggage) to represent context which needs to be propagated to external dependencies. It has other properties described in [Activity Reference](#activity-reference). @@ -63,18 +63,18 @@ When that activity is started, it gets an [Id](id) and [Parent](parent). { var baggageItem = NameValueHeaderValue.Parse(pair); activity.AddBaggage(baggageItem.Key, baggageItem.Value); - } + } httpListener.StartActivity(activity, new {context}); try { //process request ... } finally { //stop activity httpListener.StopActivity(activity, new {context} ); - } + } } } ``` -**Note** +**Note** - instead of Activity.Start() and Stop() methods, in above example we call `DiagnosticSource.StartActivity()` and `StopActivity()` methods that write events to DiagnosticSource. - Activity creation is guarded with a call to `DiagnosticSource.IsEnabled` thus eliminating any unnecessary performance impact if no-one is listening to this `DiagnosticSource`. @@ -96,8 +96,8 @@ When an application makes an outbound call, for example to an external web-servi } finally { //stop activity httpListener.StopActivity(activity, new {request} ); - } - } + } + } } ``` @@ -144,7 +144,7 @@ Note that in the [Incoming Request Sample](#starting-and-stopping-activity), we ["StartTime"] = activity.StartTimeUtc, } //log tags and baggage if needed - //...send document to log storage + //...send document to log storage } public void LogActivityStop() @@ -157,7 +157,7 @@ Note that in the [Incoming Request Sample](#starting-and-stopping-activity), we ["ParentId"] = activity.ParentId, ["Duration"] = activity.Duration }; - + //warning: Baggage or Tag could have duplicated keys! foreach (var kv in activity.Tags) document[kv.Key] = kv.Value; @@ -190,32 +190,32 @@ It's crucial that Activity Id is logged along with every event. ParentId, Tags a ## Activity Id The main goal of Activity is to ensure telemetry events could be correlated in order to trace user requests and Activity.Id is the key part of this functionality. -Applications start Activity to represent logical piece of work to be done; one Activity may be started as a child of another Activity. +Applications start Activity to represent logical piece of work to be done; one Activity may be started as a child of another Activity. The whole operation may be represented as a tree of Activities. All operations done by the distributed system may be represented as a forest of Activities trees. Id uniquely identifies Activity in the forest. It has an hierarchical structure to efficiently describe the operation as Activity tree. -Activity.Id serves as hierarchical Request-Id in terms of [HTTP Correlation Protocol](HttpCorrelationProtocol.md) +Activity.Id serves as hierarchical Request-Id in terms of [HTTP Correlation Protocol](HttpCorrelationProtocol.md) ### Id Format `|root-id.id1_id2.id3_id4.` -e.g. +e.g. `|a000b421-5d183ab6.1.8e2d4c28_1.` -It starts with '|' followed by [root-id](#root-id) followed by '.' and small identifiers of local Activities, separated by '.' or '_'. +It starts with '|' followed by [root-id](#root-id) followed by '.' and small identifiers of local Activities, separated by '.' or '_'. [Root-id](#root-id) identifies the whole operation and 'Id' identifies particular Activity involved in operation processing. -'|' indicates Id has hierarchical structure, which is useful information for logging system. +'|' indicates Id has hierarchical structure, which is useful information for logging system. * Id is 1024 bytes or shorter -* Id consist of [Base64](https://en.wikipedia.org/wiki/Base64), '-' (hyphen), '.' (dot), '_' (underscore) and '#' (pound) characters. +* Id consist of [Base64](https://en.wikipedia.org/wiki/Base64), '-' (hyphen), '.' (dot), '_' (underscore) and '#' (pound) characters. Where base64 and '-' are used in nodes and other characters delimit nodes. Id always ends with one of the delimiters. ### Root Id -When you start the first Activity for the operation, you may optionally provide root-id through `Activity.SetParentId(string)` API. +When you start the first Activity for the operation, you may optionally provide root-id through `Activity.SetParentId(string)` API. If you don't provide it, Activity will generate root-id: e.g. `a000b421-5d183ab6` @@ -223,7 +223,7 @@ If don't have ParentId from external process and want to generate one, keep in m * MUST be sufficiently large to identify single operation in entire system: use 64(or 128) bit random number or Guid * MUST contain only [Base64 characters](https://en.wikipedia.org/wiki/Base64) and '-' (dash) -To get root id, use `Activity.RootId` property after providing ParentId or after starting Activity. +To get root id, use `Activity.RootId` property after providing ParentId or after starting Activity. ### Child Activities and Parent Id #### Internal Parent @@ -234,7 +234,7 @@ Activity generates Id in following format `parent-id.local-id.`. #### External Parent Activities which parent is external to the process, should be assigned with Parent-Id (before start) with `Activity.SetParentId(string)` API. -Activity would use another suffix for Id, as described in [Root Id](#root-id) section and will append '_' delimiter that indicates +Activity would use another suffix for Id, as described in [Root Id](#root-id) section and will append '_' delimiter that indicates that parent came from the external process. If external ParentId does not start with '|', Activity will add prepend it's own Id with '|' and will keep ParentId intact. @@ -249,7 +249,7 @@ and '#' delimiter that indicates overflow: `.local-id#` # Reference -## Activity +## Activity ### Tags `IEnumerable> Tags { get; }` - Represents information to be logged along with the activity. Good examples of tags are instance/machine name, incoming request HTTP method, path, user/user-agent, etc. Tags are **not passed** to child of activities. Typical tag usage includes adding a few custom tags and enumeration through them to fill log event payload. Retrieving a tag by its key is not supported. @@ -257,15 +257,15 @@ Typical tag usage includes adding a few custom tags and enumeration through them ### Baggage `IEnumerable> Baggage { get; }` - Represents information to be logged with the activity **and** passed to its children. Examples of baggage include correlation id, sampling and feature flags. Baggage is serialized and **passed along with external dependency requests**. -Typical Baggage usage includes adding a few baggage properties and enumeration through them to fill log event payload. +Typical Baggage usage includes adding a few baggage properties and enumeration through them to fill log event payload. ### OperationName `string OperationName { get; }` - Coarsest name for an activity. This name must be set in the constructor. - + ### StartTimeUtc `DateTime StartTimeUtc { get; private set; }` - DateTime in UTC (Greenwich Mean Time) when activity was started. If it's not already initialized, it will be set to DateTime.UtcNow in `Start`. -### Duration +### Duration `TimeSpan Duration { get; private set; }` - Represents Activity duration if activity was stopped, TimeSpan.Zero otherwise. ### Id diff --git a/src/libraries/System.Diagnostics.DiagnosticSource/src/DiagnosticSourceUsersGuide.md b/src/libraries/System.Diagnostics.DiagnosticSource/src/DiagnosticSourceUsersGuide.md index 24581fb012060..11f7b2f25bf24 100644 --- a/src/libraries/System.Diagnostics.DiagnosticSource/src/DiagnosticSourceUsersGuide.md +++ b/src/libraries/System.Diagnostics.DiagnosticSource/src/DiagnosticSourceUsersGuide.md @@ -164,10 +164,10 @@ Thus the event names only need to be unique within a component. * DO - use activities (see [Activity Users Guide](ActivityUserGuide.md)) for events that are marking the beginning and end of an interval of time. The key value of Activities is that they indicate that they represent a DURATION, and they also track what 'caused' them (and thus - logging systems can stitch together a 'causality graph'). + logging systems can stitch together a 'causality graph'). -* DO - If for some reason you can't use Activities, and your events mark the start and stop of - an interval of time, use the 'Start' and 'Stop' suffixes on the events. +* DO - If for some reason you can't use Activities, and your events mark the start and stop of + an interval of time, use the 'Start' and 'Stop' suffixes on the events. ### Payloads @@ -177,18 +177,18 @@ Thus the event names only need to be unique within a component. * CONSIDER creating an explicit type for the payload. The main value for doing this is that the receiver can cast the received object to that type and immediately fetch fields (with anonymous types - reflection must be used to fetch fields). This is both easier to program and more efficient. - Thus in scenarios where there is likely high-volume filtering to be done by the logging listener, having - this type available to do the cast is valuable. Note that this type needs to be made public (since - the listener needs to see it), and should be under the namespace System.Diagnostics.DiagnosticSource.PayloadTypes. - Note that if there is doubt about the value DO NOT create an explicit type, as you CAN convert from + reflection must be used to fetch fields). This is both easier to program and more efficient. + Thus in scenarios where there is likely high-volume filtering to be done by the logging listener, having + this type available to do the cast is valuable. Note that this type needs to be made public (since + the listener needs to see it), and should be under the namespace System.Diagnostics.DiagnosticSource.PayloadTypes. + Note that if there is doubt about the value DO NOT create an explicit type, as you CAN convert from an anonymous type to a explicit type compatibly in the future, but once you expose the payload type - you must keep it forever. The payload type should simply have C# 'TYPE NAME {get; set; }' properties - (you can't use fields). You may add new properties as needed in the future. + you must keep it forever. The payload type should simply have C# 'TYPE NAME {get; set; }' properties + (you can't use fields). You may add new properties as needed in the future. * CONSIDER in high volume cases (e.g. > 1K/sec) consider reusing the payload object instead of - creating a new one each time the event is fired. This only works well if you already have locking - or exclusive objects where you can remember the payload for the 'next' event to send easily and + creating a new one each time the event is fired. This only works well if you already have locking + or exclusive objects where you can remember the payload for the 'next' event to send easily and correctly (you are only saving an object allocation, which is not large). * CONSIDER - if you have an event that is so frequent that the performance of the logging is @@ -256,7 +256,7 @@ A typical use of the `AllListeners` static property looks like this: // Here is where we put code to subscribe to the Listener. } }); - + // Typically you leave the listenerSubscription subscription active forever. // However when you no longer want your callback to be called, you can // call listenerSubscription.Dispose() to cancel your subscription to the IObservable. diff --git a/src/libraries/System.Diagnostics.DiagnosticSource/src/FlatRequestId.md b/src/libraries/System.Diagnostics.DiagnosticSource/src/FlatRequestId.md index 1f8165a8f93be..fdaccc7fffe1a 100644 --- a/src/libraries/System.Diagnostics.DiagnosticSource/src/FlatRequestId.md +++ b/src/libraries/System.Diagnostics.DiagnosticSource/src/FlatRequestId.md @@ -1,18 +1,18 @@ # Note Starting with System.Diagnostics.DiagnosticSource 4.6.0 (that ships with .Net Core 3.0), we are moving towards [W3C Trace-Context](https://www.w3.org/TR/trace-context/) standard. We still support Request-Id ([hierarchical](HierarchicalRequestId.md) version) and it is still the default format for `System.Diagnostics.Activity`. -This specification for `Flat Request-Id` is **deprecated**. +This specification for `Flat Request-Id` is **deprecated**. There is no corresponding implementation in .NET and if you are looking into 'flat' correlation protocol - we recommend following [W3C Trace-Context](https://www.w3.org/TR/trace-context/). # Flat Request-Ids -This document provide guidance for implementations of [HTTP Correlation Protocol](HttpCorrelationProtocol.md) without [Hierarchical Request-Id](HierarchicalRequestId.md) support or interoperability with services that do not support it. +This document provide guidance for implementations of [HTTP Correlation Protocol](HttpCorrelationProtocol.md) without [Hierarchical Request-Id](HierarchicalRequestId.md) support or interoperability with services that do not support it. We strongly recommend every implementation to support [Hierarchical Request-Id](HierarchicalRequestId.md) wherever possible. If implementation do not support it, it still MUST ensure essential requirements are met: * `Request-Id` uniquely identifies every HTTP request involved in operation processing and MUST be generated for every incoming and outgoing request * `Correlation-Context` has `Id` property serving as single unique identifier of the whole operation and implementation MUST generate one if it is missing. -It is important to log `Request-Id` received from the upstream service along with the incoming request. It ensures that parent-child relationships between requests are retained and the whole tree of the requests could be restored. +It is important to log `Request-Id` received from the upstream service along with the incoming request. It ensures that parent-child relationships between requests are retained and the whole tree of the requests could be restored. Therefore implementations MUST provide access to the 'parent' Request-Id for logging system. [Root Request Id](HierarchicalRequestId.md#root-request-id-generation) requirements and generation considerations must be used for flat Request-Id @@ -29,7 +29,7 @@ If implementation needs to add `Id` property to `Correlation-Context`: * MUST follow [Root Request Id Generation](HierarchicalRequestId.md#root-request-id-generation) rules otherwise ## Non-hierarchical Request-Id example -1. A: service-a receives request +1. A: service-a receives request * scans through its headers does not find Request-Id. * generates a new one: `abc` * adds extra property to CorrelationContext `Id=123` @@ -72,7 +72,7 @@ Requirements listed [Request-Id](HttpCorrelationProtocol.md#request-id) help to Let's imagine service-a supports hierarchical Request-Id and service-b does not: -1. A: service-a receives request +1. A: service-a receives request * scans through its headers and does not find `Request-Id`. * generates a new one: `|Guid.` * logs event that operation was started along with `Request-Id: |Guid.` @@ -82,7 +82,7 @@ Let's imagine service-a supports hierarchical Request-Id and service-b does not: * sends request to service-b 3. B: service-b receives request * scans through its headers and finds `Request-Id: |Guid.1_` - * generates a new Request-Id: `def` + * generates a new Request-Id: `def` * does not see `Correlation-Context`. It parses parent Request-Id, extracts root node: `Guid` and adds `Id` property to `CorrelationContext : Id=abc` * logs event that operation was started * processes request and responds to service-a diff --git a/src/libraries/System.Diagnostics.DiagnosticSource/src/HierarchicalRequestId.md b/src/libraries/System.Diagnostics.DiagnosticSource/src/HierarchicalRequestId.md index c9fbe7a912e01..353fc0055d758 100644 --- a/src/libraries/System.Diagnostics.DiagnosticSource/src/HierarchicalRequestId.md +++ b/src/libraries/System.Diagnostics.DiagnosticSource/src/HierarchicalRequestId.md @@ -8,22 +8,22 @@ Guids or big random number help to achieve it, but they require other identifier Hierarchical Request-Id looks like `|...` (e.g. `|9e74f0e5-efc4-41b5-86d1-3524a43bd891.bcec871c_1.`) and holds all information needed to trace whole operation and particular request. Root-id serves as common identifier for all requests involved in operation processing and local-ids represent internal activities (and requests) done within scope of this operation. -Upstream service/client application may be instrumented with other tracing system, so implementation MAY have compatibility layer that parses another set of trace headers. +Upstream service/client application may be instrumented with other tracing system, so implementation MAY have compatibility layer that parses another set of trace headers. Therefore implementation SHOULD be tolerant to other formats of trace identifiers and do the best effort to keep `root-id` equivalent in particular tracing system. ### Formatting Hierarchical Request-Id -If `Request-Id` was not provided from upstream service and implementation decides to trace the request, it MUST generate new `Request-Id` (see [Root Request Id Generation](#root-request-id-generation)) to represent incoming request. +If `Request-Id` was not provided from upstream service and implementation decides to trace the request, it MUST generate new `Request-Id` (see [Root Request Id Generation](#root-request-id-generation)) to represent incoming request. -In heterogeneous environment implementations of this protocol with hierarchical `Request-Id` may interact with other services that do not implement this protocol, but still have notion of request Id. Implementation or logging system should be able unambiguously identify if given `Request-Id` has hierarchical schema. +In heterogeneous environment implementations of this protocol with hierarchical `Request-Id` may interact with other services that do not implement this protocol, but still have notion of request Id. Implementation or logging system should be able unambiguously identify if given `Request-Id` has hierarchical schema. -Therefore every implementation which support hierarchical structure MUST prepend "|" (vertical bar) to generated `Request-Id`. +Therefore every implementation which support hierarchical structure MUST prepend "|" (vertical bar) to generated `Request-Id`. It also MUST append "." (dot) to the end of generated Request-Id to unambiguously mark end of it (e.g. search for `|123` may return `|1234`, but search for `|123.` would be exact) #### Root Request Id Generation Root Request-Id is the top most Request-Id generated by the first instrumented service. In a hierarchical Request-Id, it is a root node and common for all requests involved in operation processing. It MUST be unique to every high-level operation in the system, so for every traced operation, implementation MUST generate sufficiently large identifier: e.g. GUID, 64-bit or 128-bit random number. -Note that random numbers could be encoded to string to decrease Request-Id length. +Note that random numbers could be encoded to string to decrease Request-Id length. Root Request-Id MUST contain only [Base64](https://en.wikipedia.org/wiki/Base64) and "-" (hyphen) characters. @@ -32,7 +32,7 @@ Same considerations are applied to client applications making HTTP requests and Note that in addition to unique part, it may be useful to include some meaningful information such as host name, device or process id, etc. Implementation is free to do it, keeping root id relatively short. #### Incoming Request -When Request-Id is provided by upstream service, there is no guarantee that it is unique within the entire system. +When Request-Id is provided by upstream service, there is no guarantee that it is unique within the entire system. Implementation SHOULD make it unique by adding small suffix to incoming Request-Id to represent internal activity and use it for outgoing requests. If implementation does not trust incoming Request-Id in the least, suffix may be as long as [Root Request Id](HttpCorrelationProtocol.md#root-request-id-generation). @@ -45,22 +45,22 @@ Implementation MUST append "_" (underscore) to mark the end of generated incomin #### Outgoing Request When making request to downstream service, implementation MUST append small id to the incoming Request-Id and pass a new Request-Id to downstream service. -- Suffix MUST be unique for every outgoing HTTP request sent while processing the incoming request; monotonically incremented number of outgoing request within the scope of this incoming operation, is a good candidate. +- Suffix MUST be unique for every outgoing HTTP request sent while processing the incoming request; monotonically incremented number of outgoing request within the scope of this incoming operation, is a good candidate. - Suffix MUST contain only [Base64](https://en.wikipedia.org/wiki/Base64) and "-" (hyphen) characters Implementation MUST append "." (dot) to mark the end of generated outgoing Request-Id. -It may be useful to split incoming request processing to multiple logical sub-operations and assign different identifiers to them, similarly as it is done for outgoing request, except the sub-operation is processed within the same service. +It may be useful to split incoming request processing to multiple logical sub-operations and assign different identifiers to them, similarly as it is done for outgoing request, except the sub-operation is processed within the same service. #### Request-Id Overflow Extending `Request-Id` may cause it to exceed length limit. -To handle overflow, implementation: +To handle overflow, implementation: * MUST generate suffix that keeps possibility of collision with any of the previous or future Request-Id within the same operation neglectable. * MUST append "#" symbol to suffix to indicate that overflow happened. -* MUST trim end of existing Request-Id to make a room for generated LocalId. Implementation MUST trim whole nodes (separated with ".", "_") without preceding delimiter, i.e. it's invalid to trim only part of node. +* MUST trim end of existing Request-Id to make a room for generated LocalId. Implementation MUST trim whole nodes (separated with ".", "_") without preceding delimiter, i.e. it's invalid to trim only part of node. * Suffix MUST contain only [Base64](https://en.wikipedia.org/wiki/Base64) and '-' (hyphen) characters -As a result Request-Id will look like: +As a result Request-Id will look like: `Beginning-Of-Incoming-Request-Id.LocalId#` @@ -74,7 +74,7 @@ Let's consider three services: service-a, service-b and service-c. User calls se `User -> service-a -> service-b` -1. A: service-a receives request +1. A: service-a receives request * does not find `Request-Id` and generates a new root Request-Id `|Guid.` * trace that incoming request was started along with `Request-Id: |Guid.` 2. A: service-a makes request to service-b: diff --git a/src/libraries/System.Diagnostics.DiagnosticSource/src/HttpCorrelationProtocol.md b/src/libraries/System.Diagnostics.DiagnosticSource/src/HttpCorrelationProtocol.md index 5c2898ef8f2fc..7969c442ebdac 100644 --- a/src/libraries/System.Diagnostics.DiagnosticSource/src/HttpCorrelationProtocol.md +++ b/src/libraries/System.Diagnostics.DiagnosticSource/src/HttpCorrelationProtocol.md @@ -5,7 +5,7 @@ Starting with System.Diagnostics.DiagnosticSource 4.6.0 (that ships with .Net Co [Flat Request-Id](FlatRequestId.md) is **deprecated**. # Overview - + One of the common problems in microservices development is ability to trace request flow from client (application, browser) through all the services involved in processing. Typical scenarios include: @@ -16,7 +16,7 @@ Typical scenarios include: These scenarios require every request to carry additional context and services to enrich their telemetry events with this context, so it would possible to correlate telemetry from all services involved in operation processing. -Tracing an operation involves an overhead on application performance and should always be considered as optional, so application may not trace anything, trace only particular operations or some percent of all operations. +Tracing an operation involves an overhead on application performance and should always be considered as optional, so application may not trace anything, trace only particular operations or some percent of all operations. Tracing should be consistent: operation should be either fully traced, or not traced at all. This document provides guidance on the context needed for telemetry correlation and describes its format in HTTP communication. The context is not specific to HTTP protocol, it represents set of identifiers that is needed or helpful for end-to-end tracing. Applications widely use distributed queues for asynchronous processing so operation may start (or continue) from a queue message; applications should propagate the context through the queues and restore (create) it when they start processing received task. @@ -28,15 +28,15 @@ This document provides guidance on the context needed for telemetry correlation | Correlation-Context | Optional. Comma separated list of key-value pairs: Id=id, key1=value1, key2=value2 | Operation context which is propagated across all services involved in operation processing | ## Request-Id -`Request-Id` uniquely identifies every HTTP request involved in operation processing. +`Request-Id` uniquely identifies every HTTP request involved in operation processing. -Request-Id is generated on the caller side and passed to callee. +Request-Id is generated on the caller side and passed to callee. -Implementation of this protocol should expect to receive `Request-Id` in header of incoming request. +Implementation of this protocol should expect to receive `Request-Id` in header of incoming request. Absence of Request-Id indicates that it is either the first instrumented service in the system or this request was not traced by upstream service and therefore does not have any context associated with it. To start tracing the request, implementation MUST generate new `Request-Id` (see [Root Request Id Generation](#root-request-id-generation)) for the incoming request. -When Request-Id is provided by upstream service, there is no guarantee that it is unique within the entire system. +When Request-Id is provided by upstream service, there is no guarantee that it is unique within the entire system. Implementation SHOULD make it unique by adding small suffix to incoming Request-Id to represent internal activity and use it for outgoing requests, see more details in [Hierarchical Request-Id document](HierarchicalRequestId.md). `Request-Id` is required field, i.e., every instrumented request MUST have it. If implementation does not find `Request-Id` in the incoming request headers, it should consider it as non-traced and MAY not look for `Correlation-Context`. @@ -53,7 +53,7 @@ See [Flat Request-Id](FlatRequestId.md) for non-hierarchical Request-Id requirem ## Correlation-Context First service MAY add state (key value pairs) that will automatically propagate to all other services including intermediary services (that support this protocol). A typical scenarios for the Correlation-Context include logging control and sampling or A/B testing (feature flags) so that the first service has a way to pass this kind of information down to all services (including intermediary). All services other than the first one SHOULD consider Correlation-Context as read-only. -It is important to keep the size of any property small because these get serialized into HTTP headers which have significant size restrictions; Correlation-Context parsing, storing and propagation involves performance overhead on all downstream services. +It is important to keep the size of any property small because these get serialized into HTTP headers which have significant size restrictions; Correlation-Context parsing, storing and propagation involves performance overhead on all downstream services. Correlation-Context MUST NOT be used as generic data passing mechanism between services or within one service. @@ -69,11 +69,11 @@ Implementation MUST provide read access to `Correlation-Context` for logging sys `Correlation-Context: key1=value1, key2=value2` -Keys and values MUST NOT contain "=" (equals) or "," (comma) characters. +Keys and values MUST NOT contain "=" (equals) or "," (comma) characters. Overall Correlation-Context length MUST NOT exceed 1024 bytes, key and value length should stay well under the combined limit of 1024 bytes. -Note that uniqueness of the key within the Correlation-Context is not guaranteed. Context received from upstream service is read-only and implementation MUST NOT remove or aggregate duplicated keys. +Note that uniqueness of the key within the Correlation-Context is not guaranteed. Context received from upstream service is read-only and implementation MUST NOT remove or aggregate duplicated keys. # HTTP Guidelines and Limitations - [HTTP 1.1 RFC2616](https://tools.ietf.org/html/rfc2616) diff --git a/src/libraries/System.Diagnostics.Tracing/documentation/EventCounterTutorial.md b/src/libraries/System.Diagnostics.Tracing/documentation/EventCounterTutorial.md index f37f206219ee1..b7321f90d452b 100644 --- a/src/libraries/System.Diagnostics.Tracing/documentation/EventCounterTutorial.md +++ b/src/libraries/System.Diagnostics.Tracing/documentation/EventCounterTutorial.md @@ -11,7 +11,7 @@ In the sequel, we assume you are familiar with the basic `EventSource` usage, if Without further ado, here is an example on how to use the `EventCounter` ```c# -// Give your event sources a descriptive name using the EventSourceAttribute, otherwise the name of the class is used. +// Give your event sources a descriptive name using the EventSourceAttribute, otherwise the name of the class is used. [EventSource(Name = "Samples-EventCounterDemos-Minimal")] public sealed class MinimalEventCounterSource : EventSource { @@ -19,7 +19,7 @@ public sealed class MinimalEventCounterSource : EventSource public static MinimalEventCounterSource Log = new MinimalEventCounterSource(); private EventCounter requestCounter; - private MinimalEventCounterSource() : base(EventSourceSettings.EtwSelfDescribingEventFormat) + private MinimalEventCounterSource() : base(EventSourceSettings.EtwSelfDescribingEventFormat) { this.requestCounter = new EventCounter("request", this); } @@ -35,8 +35,8 @@ public sealed class MinimalEventCounterSource : EventSource // 2. Each counter supports a single float value, so conceptually it maps to a single // measurement in the code. // 3. You don't have to have log with WriteEvent if you don't think you will ever care about details - // of individual requests (that counter data is sufficient). - WriteEvent(1, url, elapsedMSec); // This logs it to the event stream if events are on. + // of individual requests (that counter data is sufficient). + WriteEvent(1, url, elapsedMSec); // This logs it to the event stream if events are on. this.requestCounter.WriteMetric(elapsedMSec); // This adds it to the PerfCounter called 'Request' if PerfCounters are on } } @@ -61,7 +61,7 @@ As usual, turn on PerfView, and then run the sample code - we get have something Now let's drill into what the data captured means - when I copied from PerfView, it looks like this ``` -ThreadID="17,800" ProcessorNumber="5" Payload="{ Name:"request", Mean:142.0735, StandardDeviation:42.07355, Count:2, Min:100, Max:184.1471, IntervalSec:1.000588 }" +ThreadID="17,800" ProcessorNumber="5" Payload="{ Name:"request", Mean:142.0735, StandardDeviation:42.07355, Count:2, Min:100, Max:184.1471, IntervalSec:1.000588 }" ``` Now it is obvious that within a sampling period, we have 9 events, and all the other statistics. @@ -84,4 +84,4 @@ In the next relaese of PerfView (> 2.0.26), we can visualize the counters using Then it will show you a line graph showing the mean of the data like this. If you have multiple event counters, it can show multiple plots. You can also filter out a particular subset of counters using the filter text option: - EventCounter graph \ No newline at end of file + EventCounter graph diff --git a/src/libraries/System.Dynamic.Runtime/README.md b/src/libraries/System.Dynamic.Runtime/README.md index 941778be79899..2c47415d92f2a 100644 --- a/src/libraries/System.Dynamic.Runtime/README.md +++ b/src/libraries/System.Dynamic.Runtime/README.md @@ -5,4 +5,4 @@ The library is effectively archived. The library and supporting language features are mature and no longer evolving, and the risk of code change likely exceeds the benefit. We will consider changes that address significant bugs or regressions, or changes that are necessary to continue shipping the binaries. -Other changes will be rejected. \ No newline at end of file +Other changes will be rejected. diff --git a/src/libraries/System.Linq.Expressions/README.md b/src/libraries/System.Linq.Expressions/README.md index 582864332a51a..a24bbfc1d53ec 100644 --- a/src/libraries/System.Linq.Expressions/README.md +++ b/src/libraries/System.Linq.Expressions/README.md @@ -5,4 +5,4 @@ The library is effectively archived. The library and supporting language features are mature and no longer evolving, and the risk of code change likely exceeds the benefit. We will consider changes that address significant bugs or regressions, or changes that are necessary to continue shipping the binaries. -Other changes will be rejected. \ No newline at end of file +Other changes will be rejected. diff --git a/src/libraries/System.Net.Http/src/HttpDiagnosticsGuide.md b/src/libraries/System.Net.Http/src/HttpDiagnosticsGuide.md index 7a73dda2ccf99..318d791b0bb79 100644 --- a/src/libraries/System.Net.Http/src/HttpDiagnosticsGuide.md +++ b/src/libraries/System.Net.Http/src/HttpDiagnosticsGuide.md @@ -8,19 +8,19 @@ Applications typically log outgoing HTTP requests. Usually, it's done with `Dele Context is represented as `System.Diagnostics.Activity` class. `Activity` may be started as a child of another `Activity`, and the whole operation is represented with a tree of Activities. You can find more details in the [Activity User Guide](https://github.com/dotnet/runtime/blob/master/src/libraries/System.Diagnostics.DiagnosticSource/src/ActivityUserGuide.md). -`Activity` carries useful properties for logging such as `Id`, `Tags`, `Baggage`, start time and parent information. +`Activity` carries useful properties for logging such as `Id`, `Tags`, `Baggage`, start time and parent information. Instrumentation ensures `Activity.Current` represents current outgoing request in *Write* event callbacks (if the request is instrumented). Consumers **should not** assume `Activity.Current` is accurate in *IsEnabled* callbacks. In a microservice environment, some context should flow with outgoing requests to correlate telemetry from all services involved in processing an operation. -Instrumentation adds context into the request headers: +Instrumentation adds context into the request headers: * *Request-Id* header with `Activity.Id` value * *Correlation-Context* header with `Activity.Baggage` key-value pair list in `k1=v1, k2=v2` format - + See [HTTP Protocol proposal](https://github.com/dotnet/runtime/blob/master/src/libraries/System.Diagnostics.DiagnosticSource/src/HttpCorrelationProtocol.md) for more details. ## Subscription -Instrumentation is off by default. To enable it, consumer firstly needs to subscribe to a `DiagnosticListener` called *HttpHandlerDiagnosticListener*. +Instrumentation is off by default. To enable it, consumer firstly needs to subscribe to a `DiagnosticListener` called *HttpHandlerDiagnosticListener*. ```C# var subscription = DiagnosticListener.AllListeners.Subscribe(delegate (DiagnosticListener listener) @@ -45,7 +45,7 @@ If there is a consumer, instrumentation calls `DiagnosticListener.IsEnabled("Sys The consumer may optionally provide predicate to `DiagnosticListener` to prevent some requests from being instrumented: e.g. if the logging system has HTTP interface, it could be necessary to filter out requests to logging system itself. ```C# - var predicate = (name, r, _) => + var predicate = (name, r, _) => { var request = r as HttpRequestMessage; if (request != null) @@ -64,8 +64,8 @@ If *"System.Net.Http.HttpRequestOut.Start"* event is enabled, instrumentation wr When the request is completed (faulted with an exception, cancelled or successfully completed), instrumentation stops the activity and writes *"System.Net.Http.HttpRequestOut.Stop"* event. Event payload has the following properties: -* **Response** with `HttpResponseMessage` object representing the response, which could be null if the request was failed or cancelled. -* **Request** with `HttpRequestMessage` object representing the request. If the response was received, you can also access it with `HttpResponseMessage.RequestMessage`, but if there was no response, it could be accessed only from the event payload +* **Response** with `HttpResponseMessage` object representing the response, which could be null if the request was failed or cancelled. +* **Request** with `HttpRequestMessage` object representing the request. If the response was received, you can also access it with `HttpResponseMessage.RequestMessage`, but if there was no response, it could be accessed only from the event payload * **RequestTaskStatus** with `TaskStatus` enum value that describes the status of the request's task. This event is sent under the same conditions as *"System.Net.Http.HttpRequestOut.Start"* event. @@ -90,8 +90,8 @@ Otherwise, `Activity.Current` represents some 'parent' activity (presumably inco 7. `DiagnosticListener.Write("System.Net.Http.HttpRequestOut.Stop", new {Response, RequestTaskStatus})` - notifies that activity (outgoing request) is stopping # Non-Activity events (deprecated) -There are two events *"System.Net.Http.Request"* and *"System.Net.Http.Response"*, currently are also emited for compatibility purposes. +There are two events *"System.Net.Http.Request"* and *"System.Net.Http.Response"*, currently are also emited for compatibility purposes. They are redundant with the *"System.Net.Http.HttpRequestOut.Start"* and *"System.Net.Http.HttpRequestOut.Stop"* events (but do not set `Activity.Current` and follow activity conventions - start/stop). They are deprecated, and consumers are advised only to depend on them to support .NET Core V1.1 apps (where the new events are not present). -It is likely that these deprecated events will be removed at some point. +It is likely that these deprecated events will be removed at some point. Consumers should migrate to *"System.Net.Http.HttpRequestOut.Start"* and *"System.Net.Http.HttpRequestOut.Stop"* events instead. diff --git a/src/libraries/System.Net.Http/tests/StressTests/HttpStress/Readme.md b/src/libraries/System.Net.Http/tests/StressTests/HttpStress/Readme.md index 4956b3f129e98..1980b93ea10b5 100644 --- a/src/libraries/System.Net.Http/tests/StressTests/HttpStress/Readme.md +++ b/src/libraries/System.Net.Http/tests/StressTests/HttpStress/Readme.md @@ -40,8 +40,8 @@ This will build libraries and stress suite to a linux docker image and initializ #### Using Windows containers -Before we get started, please see -[docker documentation](https://docs.docker.com/docker-for-windows/#switch-between-windows-and-linux-containers) +Before we get started, please see +[docker documentation](https://docs.docker.com/docker-for-windows/#switch-between-windows-and-linux-containers) on how windows containers can be enabled on your machine. Once ready, simply run: diff --git a/src/libraries/System.Net.Security/tests/StressTests/SslStress/Readme.md b/src/libraries/System.Net.Security/tests/StressTests/SslStress/Readme.md index 2bfb0b4141c67..06a4b53bace32 100644 --- a/src/libraries/System.Net.Security/tests/StressTests/SslStress/Readme.md +++ b/src/libraries/System.Net.Security/tests/StressTests/SslStress/Readme.md @@ -40,8 +40,8 @@ This will build the libraries and stress suite to a linux docker image and initi #### Using Windows containers -Before we get started, please see -[docker documentation](https://docs.docker.com/docker-for-windows/#switch-between-windows-and-linux-containers) +Before we get started, please see +[docker documentation](https://docs.docker.com/docker-for-windows/#switch-between-windows-and-linux-containers) on how windows containers can be enabled on your machine. Once ready, simply run: diff --git a/src/libraries/System.Text.Json/docs/ParameterizedCtorSpec.md b/src/libraries/System.Text.Json/docs/ParameterizedCtorSpec.md index 3ac0a9e399573..01830385f5dfb 100644 --- a/src/libraries/System.Text.Json/docs/ParameterizedCtorSpec.md +++ b/src/libraries/System.Text.Json/docs/ParameterizedCtorSpec.md @@ -28,7 +28,7 @@ Also consider `User`: public class User { public string UserName { get; private set; } - + public bool Enabled { get; private set; } public User() { } @@ -129,7 +129,7 @@ Non-`public` support is not provided by `JsonSerializer` by default, so configur The constructor to use can also be specified with a `[SerializationConstructor]` attribute. -`Utf8Json` does not support non-`public` constructors, even with the attribute. +`Utf8Json` does not support non-`public` constructors, even with the attribute. ### `Jil` (.NET) @@ -145,7 +145,7 @@ and proposed in this spec. ```Java @JsonCreator public BeanWithCreator( - @JsonProperty("id") int id, + @JsonProperty("id") int id, @JsonProperty("theName") String name) { this.id = id; this.name = name; @@ -459,7 +459,7 @@ Given `Person: public class Person { public string FirstName { get; set; } - + public string LastName { get; set; } public Guid Id { get; } @@ -506,7 +506,7 @@ public class PointWrapper public PointWrapper(Point_3D point) {} } - + public struct Point_3D { public int X { get; } @@ -523,7 +523,7 @@ We can ignore `null` tokens and not pass them as arguments to a non-nullable par ```C# JsonSerializerOptions options = new JsonSerializerOptions { - IgnoreNullValues = true + IgnoreNullValues = true }; PointWrapper obj = JsonSerializer.Deserialize(@"{""Point"":null}"); // obj.Point is `default` ``` @@ -546,7 +546,7 @@ We expect most users to have significantly less than 64 parameters, but we can r #### [`ReferenceHandling` semantics](https://github.com/dotnet/runtime/blob/13c1e65a9f7aab201fe77e3daba11946aeb7cbaa/src/libraries/System.Text.Json/docs/ReferenceHandling_spec.md) will not be applied to objects deserialized with parameterized constructors -`NotSupportedException` will be thrown if any properties named "$id", "$ref", or "$values" are found in the payload, and `options.ReferenceHandling` is set to +`NotSupportedException` will be thrown if any properties named "$id", "$ref", or "$values" are found in the payload, and `options.ReferenceHandling` is set to `ReferenceHandling.Preserve`. If the feature is off, these properties will be treated like any other (likely end up in extension data property). This behavior prevents us from breaking people if we implement this feature in the future. diff --git a/src/libraries/System.Text.Json/docs/ReferenceHandling_spec.md b/src/libraries/System.Text.Json/docs/ReferenceHandling_spec.md index 869162cfba4c3..447aa51e10829 100644 --- a/src/libraries/System.Text.Json/docs/ReferenceHandling_spec.md +++ b/src/libraries/System.Text.Json/docs/ReferenceHandling_spec.md @@ -36,7 +36,7 @@ **Preserve duplicated references**: Semantically represent objects and/or arrays that have been previously written, with a reference to them when found again in the object graph (using reference equality for comparison). -**Metadata**: Extra properties on JSON objects and/or arrays (that may change their schema) to enable reference preservation when round-tripping. These additional properties are only meant to be understood by the `JsonSerializer`. +**Metadata**: Extra properties on JSON objects and/or arrays (that may change their schema) to enable reference preservation when round-tripping. These additional properties are only meant to be understood by the `JsonSerializer`. # Motivation @@ -61,7 +61,7 @@ namespace System.Text.Json namespace System.Text.Json.Serialization { /// - /// This class defines the various ways the + /// This class defines the various ways the /// can deal with references on Serialization and Deserialization. /// public sealed class ReferenceHandling @@ -76,13 +76,13 @@ namespace System.Text.Json.Serialization See also the [internal implementation details](https://gist.github.com/Jozkee/b0922ef609f7a942f00ac2c93a976ff1). ## In depth -* **Default**: +* **Default**: * **On Serialize**: Throw a `JsonException` when `MaxDepth` is exceeded. This may occur by either a reference loop or by passing a very deep object. This option will not affect the performance of the serializer. * **On Deserialize**: Metadata properties will not be consumed, therefore they will be treated as regular properties that can map to a real property using `JsonPropertyName` or be added to the `JsonExtensionData` overflow dictionary. * **Preserve**: * **On Serialize**: When writing complex types (e.g. POCOs/non-primitive types), the serializer also writes the metadata (`$id`, `$values` and `$ref`) properties in order to reference them later by writing a reference to the previously written JSON object or array. - * **On Deserialize**: While the other options have no effect on deserialization, `Preserve` does affect its behavior, as follows: Metadata will be expected (although is not mandatory) and the deserializer will try to understand it. + * **On Deserialize**: While the other options have no effect on deserialization, `Preserve` does affect its behavior, as follows: Metadata will be expected (although is not mandatory) and the deserializer will try to understand it. * **Ignore**: * **On Serialize**: Ignores (skips writing) the property/element where the reference loop is detected. @@ -104,7 +104,7 @@ The next table show the combination of Newtonsoft's **ReferenceLoopHandling** (R | **Ignore** | *Ignore* | future (overlap) | future (overlap) | future (overlap) | | **Serialize** | future | *Preserve* | future | future | -Notes: +Notes: * We are deferring adding support for Newtonsoft's `MetadataPropertyHandling.ReadAhead` for now. * `Objects` and `Arrays` granularity may apply to both, serialization and deserialization. * (overlap) means that preserve references co-exists along with reference loop handling and we will need to define how to resolve that (On `Newtonsoft.Json`, `PreserveReferencesHandling` takes precedence); see [example](#using-a-custom-referencehandling-to-show-possible-future-usage). @@ -114,8 +114,8 @@ Notes: ## Using Default on Deserialize ```cs -class Employee -{ +class Employee +{ [JsonPropertyName("$id")] public string Identifier { get; set; } public Employee Manager { get; set; } @@ -124,7 +124,7 @@ class Employee public IDictionary ExtensionData { get; set; } } -private const string json = +private const string json = @"{ ""$id"": ""1"", ""Name"": ""Angela"", @@ -152,8 +152,8 @@ Note how you can annotate .Net properties to use properties that are meant for m For the next samples, let's assume you have the following class: ```cs -class Employee -{ +class Employee +{ public string Name { get; set; } public Employee Manager { get; set; } public List Subordinates { get; set; } @@ -171,8 +171,8 @@ bob.Subordinates = new List{ angela }; public static void WriteObject() { string json = JsonSerializer.Serialize(angela, options); - // Throws JsonException - - // "A possible object cycle was detected which is not supported. + // Throws JsonException - + // "A possible object cycle was detected which is not supported. // This can either be due to a cycle or if the object depth is larger than the maximum allowed depth of 64." } ``` @@ -208,7 +208,7 @@ public static void WriteIgnoringReferenceLoops() var settings = new JsonSerializerSettings { ReferenceLoopHandling = ReferenceLoopHandling.Ignore - Formatting = Formatting.Indented + Formatting = Formatting.Indented }; string json = JsonConvert.SerializeObject(angela, settings); @@ -223,7 +223,7 @@ Output: "Manager": { "Name": "Bob", // Note how subordinates is empty because Angela is being ignored. - "Subordinates": [] + "Subordinates": [] } } ``` @@ -259,7 +259,7 @@ public static void WritePreservingReference() var settings = new JsonSerializerSettings { PreserveReferencesHandling = PreserveReferencesHandling.All - Formatting = Formatting.Indented + Formatting = Formatting.Indented }; string json = JsonConvert.SerializeObject(angela, settings); @@ -275,9 +275,9 @@ Output: "Manager": { "$id": "2", "Name": "Bob", - "Subordinates": { - // Note how the Subordinates' square braces are replaced with curly braces - // in order to include $id and $values properties, + "Subordinates": { + // Note how the Subordinates' square braces are replaced with curly braces + // in order to include $id and $values properties, // $values will now hold whatever value was meant for the Subordinates list. "$id": "3", "$values": [ @@ -285,14 +285,14 @@ Output: "$ref": "1" } ] - } + } } } ``` ## Using Preserve on Deserialize ```cs -private const string json = +private const string json = @"{ ""$id"": ""1"", ""Name"": ""Angela"", @@ -302,11 +302,11 @@ private const string json = ""Subordinates"": { ""$id"": ""3"", ""$values"": [ - { - ""$ref"": ""1"" + { + ""$ref"": ""1"" } ] - } + } } }"; ``` @@ -329,7 +329,7 @@ public static void ReadJsonWithPreservedReferences(){ var options = new JsonSerializerSettings { //Newtonsoft.Json reads metadata by default, just setting the option for illustrative purposes. - MetadataPropertyHanding = MetadataPropertyHandling.Default + MetadataPropertyHanding = MetadataPropertyHandling.Default }; Employee angela = JsonConvert.DeserializeObject(json, settings); @@ -349,7 +349,7 @@ public static void ReadJsonWithPreservedReferences(){ When using `ReferenceLoopHandling.Ignore`, other objects that were already seen on the current graph branch will be ignored on serialization. -When using `PreserveReferencesHandling.All` you are signaling that your resulting JSON will contain *metadata* properties `$ref`, `$id` and `$values` which are going to act as reference identifiers (`$id`) and pointers (`$ref`). +When using `PreserveReferencesHandling.All` you are signaling that your resulting JSON will contain *metadata* properties `$ref`, `$id` and `$values` which are going to act as reference identifiers (`$id`) and pointers (`$ref`). Now, to read back those references, you have to use `MetadataPropertyHandling.Default` to indicate that *metadata* is expected in the payload passed to the `Deserialize` method. * Pros @@ -364,21 +364,21 @@ Now, to read back those references, you have to use `MetadataPropertyHandling.De ## dojo toolkit (JavaScript framework) https://dojotoolkit.org/reference-guide/1.10/dojox/json/ref.html -Similar: https://www.npmjs.com/package/json-cyclic +Similar: https://www.npmjs.com/package/json-cyclic * id-based (ignore this approach since it is the same as `Newtonsoft.Json`) * path-based * "\#" denotes the root of the object and then uses semantics inspired by JSONPath. * It does not uses `$id` nor `$values` metadata, therefore, everything can be referenced. * Pros - * It looks cleaner. + * It looks cleaner. * Only disruptive (weird) edge case would be a reference to an array e.g: { "MyArray": { "$ref": "#manager.subordinates" } }. * Cons * Path value will become too long on very deep objects. * Storing all the complex types could become very expensive, are we going to store also primitive types? * This would break existing converters when handling reference to an array. * Not compatible with `Newtonsoft.Json`. - + ## flatted (JavaScript module) (probably not worth it) https://github.com/WebReflection/flatted @@ -395,7 +395,7 @@ https://www.baeldung.com/jackson-bidirectional-relationships-and-infinite-recurs * Let you annotate your class with `@JsonIdentityInfo` where you can define a class property that will be used to further represent the object. -## golang +## golang * Circularity detection will start to occur after a fixed threshold of 1,000 depth. * [This fix](https://go-review.googlesource.com/c/go/+/187920/) is about detecting circular references after a threshold of 1,000 and throw when found in order to prevent a non-recoverable stack overflow. @@ -430,11 +430,11 @@ As a rule of thumb, we throw on all cases where the JSON payload being read cont "Name": "Angela", "Manager":{ "$ref": "1", - "Name": "Angela" + "Name": "Angela" } } ``` - + * Metadata property **before** `$ref`: * **Newtonsoft.Json**: `$id` is disregarded, and the reference is set. * **S.T.Json**: Throw - Reference objects cannot contain other properties. @@ -462,7 +462,7 @@ As a rule of thumb, we throw on all cases where the JSON payload being read cont } } ``` - + * Reference object is before preserved object (or preserved object was never spotted): * **Newtonsoft.Json**: Reference object evaluates as `null`. * **S.T.Json**: Throw - Reference not found. @@ -482,7 +482,7 @@ As a rule of thumb, we throw on all cases where the JSON payload being read cont * Having more than one `$id` in the same object: * **Newtonsoft.Json**: last one wins, in the example, the reference object evaluates to `null` (if `$ref` would be `"2"`, it would evaluate to itself). - * **S.T.Json**: Throw - $id must be the first property. + * **S.T.Json**: Throw - $id must be the first property. ```json { "$id": "1", @@ -621,7 +621,7 @@ A preserved array is written in the next format `{ "$id": "1", "$values": [ elem Note: For Dictionary keys on serialize, should we allow serializing keys `$id`, `$ref` and `$values`? If we allow it, then there is a potential round-tripping issue. Sample of similar issue with `DictionaryKeyPolicy`: ```cs -public static void TestDictionary_Collision() +public static void TestDictionary_Collision() { var root = new Dictionary(); root["helloWorld"] = 100; @@ -642,7 +642,7 @@ public static void TestDictionary_Collision() } ``` -Resolution for above issue: +Resolution for above issue: On serialization, when a JSON property name, that is either a dictionary key or a CLR class property, starts with a '$' character, we must write the escaped character "\u0024" instead. On deserialization, metadata will be digested by using only the raw bytes, so no encoded characters are allowed in metadata; to read JSON properties that start with a '$' you will need to pass it with the escaped '$' (\u0024) or turn the feature off. @@ -675,7 +675,7 @@ Note 2: When using immutable types and `ReferenceHandling.Preserve`, you will no ## Value types -* **Serialization**: +* **Serialization**: The serializer emits an `$id` for every JSON complex type. However, to reduce bandwidth, structs will not be written with metadata, since it would be meaningless due `ReferenceEquals` is used when comparing the objects and no backpointer reference would be ever written to an struct. ```cs @@ -686,14 +686,14 @@ public static void SerializeStructs() Name = "Angela" }; - List employees = new List - { - angela, - angela + List employees = new List + { + angela, + angela }; - var options = new JsonSerializerOptions - { + var options = new JsonSerializerOptions + { ReferenceHandling = ReferenceHandling.Preserve }; @@ -717,7 +717,7 @@ Output: ``` * **Deserialization**: -The deserializer will throw when it reads `$ref` within a property that matches to a value type (such as a struct) and `ReferenceHandling.Preserve` is set. +The deserializer will throw when it reads `$ref` within a property that matches to a value type (such as a struct) and `ReferenceHandling.Preserve` is set. Example: ```cs @@ -737,8 +737,8 @@ public static void DeserializeStructs() ] }"; - var options = new JsonSerializerOptions - { + var options = new JsonSerializerOptions + { ReferenceHandling = ReferenceHandling.Preserve }; @@ -806,12 +806,12 @@ Things that we may want to consider building on top based on customer feedback: ```cs // Example of a class annotated with JsonReferenceHandling attributes. [JsonReferenceHandling(ReferenceHandling.Preserve)] -public class Employee { +public class Employee { public string Name { get; set; } [JsonReferenceHandling(ReferenceHandling.Ignore)] public Employee Manager { get; set; } - + public List Subordinates { get; set; } } ``` @@ -825,7 +825,7 @@ public static void WriteIgnoringReferenceLoopsAndReadPreservedReferences() angela.Manager = bob; bob.Subordinates = new List{ angela }; - + var allEmployees = new List { angela, @@ -861,7 +861,7 @@ public static void WriteIgnoringReferenceLoopsAndReadPreservedReferences() } }, { - // Note how element 2 is written as a reference + // Note how element 2 is written as a reference // since was previously seen in allEmployees[0].Manager "$ref": "2" } @@ -869,7 +869,7 @@ public static void WriteIgnoringReferenceLoopsAndReadPreservedReferences() */ allEmployees = JsonSerializer.Deserialize>(json, options); - Console.WriteLine(allEmployees[0].Manager == allEmployees[1]); + Console.WriteLine(allEmployees[0].Manager == allEmployees[1]); /* Output: true */ } ``` @@ -882,4 +882,4 @@ public static void WriteIgnoringReferenceLoopsAndReadPreservedReferences() 4. Value types, such as structs that contain preserve semantics, will not be supported when deserializing as well. This is because the serializer will never emit a reference object to those types and doing so implies boxing of value types. 5. Additional features, such as converter support, `ReferenceResolver`, `JsonPropertyAttribute.IsReference` and `JsonPropertyAttribute.ReferenceLoopHandling`, that build on top of `ReferenceLoopHandling` and `PreserveReferencesHandling` were considered but they can be added in the future based on customer requests. 6. We are still looking for evidence that backs up supporting `ReferenceHandling.Ignore`. This option will not ship if said evidence is not found. -7. Round-tripping support for preserved references into the `JsonExtensionData` is currently not supported (we emit the metadata on serialization and we create a JsonElement on deserialization instead), while in `Newtonsoft.Json` they are supported. This may change in a future based on customer feedback. \ No newline at end of file +7. Round-tripping support for preserved references into the `JsonExtensionData` is currently not supported (we emit the metadata on serialization and we create a JsonElement on deserialization instead), while in `Newtonsoft.Json` they are supported. This may change in a future based on customer feedback. diff --git a/src/libraries/System.Text.Json/docs/SerializerProgrammingModel.md b/src/libraries/System.Text.Json/docs/SerializerProgrammingModel.md index 503fbc3014f3f..1e85017173629 100644 --- a/src/libraries/System.Text.Json/docs/SerializerProgrammingModel.md +++ b/src/libraries/System.Text.Json/docs/SerializerProgrammingModel.md @@ -137,7 +137,7 @@ namespace System.Text.Json.Serialization protected JsonNamingPolicy() { } public abstract string ConvertName(string name); - } + } } ``` diff --git a/src/libraries/System.Text.Json/docs/writable_json_dom_spec.md b/src/libraries/System.Text.Json/docs/writable_json_dom_spec.md index 487c721d98d28..bdbc240a47659 100644 --- a/src/libraries/System.Text.Json/docs/writable_json_dom_spec.md +++ b/src/libraries/System.Text.Json/docs/writable_json_dom_spec.md @@ -17,7 +17,7 @@ It is a summer internship project being developed by @kasiabulat. ## Goals The user should be able to: -* Build up a structured in-memory representation of the JSON payload. +* Build up a structured in-memory representation of the JSON payload. * Query the document object model. * Modify it. That includes, remove, add, and update. This means we want to build a modifiable JsonDocument analogue that is not just readonly. @@ -43,7 +43,7 @@ var developer = new JsonObject }; ``` -JSON object can be nested within other JSON objects or include a JSON array: +JSON object can be nested within other JSON objects or include a JSON array: ```csharp var person = new JsonObject @@ -78,7 +78,7 @@ var person = new JsonObject "phone numbers", new JsonArray() { "123-456-7890", - "123-456-7890" + "123-456-7890" } } }; @@ -102,7 +102,7 @@ var preferences = new JsonObject() ### Modifying existing instance -The main goal of the new API is to allow users to modify existing instance of `JsonNode` which is not possible with `JsonElement` and `JsonDocument`. +The main goal of the new API is to allow users to modify existing instance of `JsonNode` which is not possible with `JsonElement` and `JsonDocument`. One may change the existing property to have a different value: ```csharp @@ -162,17 +162,17 @@ If a developer knows they will be modifying an instance, there is an API to pars ```csharp string jsonString = @" { - ""employee1"" : + ""employee1"" : { ""name"" : ""Ann"", ""surname"" : ""Predictable"", - ""age"" : 30, + ""age"" : 30, }, - ""employee2"" : + ""employee2"" : { ""name"" : ""Zoe"", ""surname"" : ""Coder"", - ""age"" : 24, + ""age"" : 24, } }"; @@ -194,7 +194,7 @@ Mailbox.SendAllEmployeesData(employees.AsJsonElement()); * `JsonNull` class instead of `null` reference to node. * No additional overloads of Add methods for primary types (bool, string, int, double, long...) for `JsonObject` and `JsonArray`. Instead - implicit cast operators in JsonNode. * `Sort` not implemented for `JsonArray`, beacuse there is no right way to compare `JsonObjects`. If a user wants to sort a `JsonArray` of `JsonNumbers`, `JsonBooleans` or `JsonStrings` they now needs to do the following: convert the `JsonArray` to a regular array (by iterating through all elements), call sort (and convert back to `JsonArray` if needed). -* Property names duplicates handling method possible to choose during parsing to `JsonNode`. When creating `JsonObject` Add method throws an exception for duplicates and indexer replaces old property value with new one. +* Property names duplicates handling method possible to choose during parsing to `JsonNode`. When creating `JsonObject` Add method throws an exception for duplicates and indexer replaces old property value with new one. * No support for escaped characters when creating `JsonNumber` from string. * `JsonValueKind` property that a caller can inspect and cast to the right concrete type * Transformation API: @@ -208,7 +208,7 @@ Mailbox.SendAllEmployeesData(employees.AsJsonElement()); * `ToJsonString` method transforming JsonNode to string representation using WriteTo. * No recursive equals for `JsonArray` and `JsonObject`. * `JsonNode` derived types do not implement `IComparable`. -* `JsonObject` does not implement `IDictionary`, but `JsonArray` implements `IList`. +* `JsonObject` does not implement `IDictionary`, but `JsonArray` implements `IList`. * We support order preservation when adding/removing values in `JsonArray`/`JsonObject`. * We do not support creating `JsonNumber` from `BigInterger` without changing it to string. * `ToString` returns: @@ -218,7 +218,7 @@ Mailbox.SendAllEmployeesData(employees.AsJsonElement()); * Is not overloaded for `JsonArray` and `JsonObject`. ## Open questions -* Do we want `JsonArray` to support `Contains`, `IndexOf` and `LastIndexOf` if we keep reference equality for `JsonArray`/`JsonObject` and don't have a good way of comparing numbers? +* Do we want `JsonArray` to support `Contains`, `IndexOf` and `LastIndexOf` if we keep reference equality for `JsonArray`/`JsonObject` and don't have a good way of comparing numbers? * Should nodes track their own position in the JSON graph? Do we want to allow properties like Parent, Next and Previous? | Solution | Pros | Cons | @@ -228,18 +228,18 @@ Mailbox.SendAllEmployeesData(employees.AsJsonElement()); * Do we want to change JsonNumber's backing field to something different than string? - Suggestions: - - `Span` or array of `Utf8String`/`Char8` (once they come online in the future) / `byte` - - Internal types that are specific to each numeric type in .NET with factories to create JsonNumber + Suggestions: + - `Span` or array of `Utf8String`/`Char8` (once they come online in the future) / `byte` + - Internal types that are specific to each numeric type in .NET with factories to create JsonNumber - Internal struct field which has all the supported numeric types - Unsigned long field accompanying string to store types that are <= 8 bytes long -* Should we add overloads for all nullable types as well? For example: - ```csharp +* Should we add overloads for all nullable types as well? For example: + ```csharp public static implicit operator System.Text.Json.JsonNode (bool? value) { throw null; } ``` -* Do we want to have implicit cast operators on `JsonNull`, `JsonBoolean`, `JsonString` and `JsonNumber` while we already have them in `JsonNode`? It would be consistent, but implicit cast from e.g. float.Infinity to `JsonNumber` would throw an exception, because we would not be able return `JsonString` in this case anymore. +* Do we want to have implicit cast operators on `JsonNull`, `JsonBoolean`, `JsonString` and `JsonNumber` while we already have them in `JsonNode`? It would be consistent, but implicit cast from e.g. float.Infinity to `JsonNumber` would throw an exception, because we would not be able return `JsonString` in this case anymore. ## Useful links diff --git a/src/libraries/System.Text.Json/roadmap/README.md b/src/libraries/System.Text.Json/roadmap/README.md index b4283ce52cc4a..de42f9934428e 100644 --- a/src/libraries/System.Text.Json/roadmap/README.md +++ b/src/libraries/System.Text.Json/roadmap/README.md @@ -5,7 +5,7 @@ * Our goal is to provide high-performance JSON APIs built into .NET Runtime. See the [announcement](https://github.com/dotnet/announcements/issues/90) for more details. - For the first version of the library, our objective is to remove the - dependency on JSON.NET within the shared framework. + dependency on JSON.NET within the shared framework. - The primary focus will be on core functionality with emphasis on performance over capabilities and additional features. @@ -51,7 +51,7 @@ ### JsonDocument * Provides the ability to parse and represent a JSON document in-memory from - UTF-8 encoded JSON text. This will enable the caller to query the JSON + UTF-8 encoded JSON text. This will enable the caller to query the JSON Document Object Model (DOM), i.e. random access of various nodes. - Ability to modify the DOM once it has been generated would be limited to slicing (i.e. only removing nodes would be supported) diff --git a/src/libraries/System.Text.Json/source_package/README.md b/src/libraries/System.Text.Json/source_package/README.md index bf48e9484de74..3673766c2a84b 100644 --- a/src/libraries/System.Text.Json/source_package/README.md +++ b/src/libraries/System.Text.Json/source_package/README.md @@ -1,3 +1,3 @@ # System.Text.Json Source Package -We are no longer producing the source package, and instead are shipping a System.Text.Json NuGet package which is compatible with netstandard2.0. Please visit [NuGet](https://www.nuget.org/packages/System.Text.Json/) in order to download it. \ No newline at end of file +We are no longer producing the source package, and instead are shipping a System.Text.Json NuGet package which is compatible with netstandard2.0. Please visit [NuGet](https://www.nuget.org/packages/System.Text.Json/) in order to download it. diff --git a/src/libraries/System.Utf8String.Experimental/README.md b/src/libraries/System.Utf8String.Experimental/README.md index 94c577f6a0759..c4e05f915d6d8 100644 --- a/src/libraries/System.Utf8String.Experimental/README.md +++ b/src/libraries/System.Utf8String.Experimental/README.md @@ -2,7 +2,7 @@ The `Utf8String` and `Char8` types are now available for experimentation. They c To install: -```ps +```ps install-package System.Utf8String.Experimental -prerelease -source https://dotnetfeed.blob.core.windows.net/dotnet-core/index.json ``` diff --git a/src/mono/mono/mini/cpu-amd64.md b/src/mono/mono/mini/cpu-amd64.md index 06abb8e7e0d72..e581a983944f7 100644 --- a/src/mono/mono/mini/cpu-amd64.md +++ b/src/mono/mono/mini/cpu-amd64.md @@ -24,9 +24,9 @@ # # len:number describe the maximun length in bytes of the instruction # number is a positive integer. If the length is not specified -# it defaults to zero. But lengths are only checked if the given opcode -# is encountered during compilation. Some opcodes, like CONV_U4 are -# transformed into other opcodes in the brg files, so they do not show up +# it defaults to zero. But lengths are only checked if the given opcode +# is encountered during compilation. Some opcodes, like CONV_U4 are +# transformed into other opcodes in the brg files, so they do not show up # during code generation. # # cost:number describe how many cycles are needed to complete the instruction (unused) @@ -270,7 +270,7 @@ float_conv_to_u1: dest:i src1:f len:49 float_conv_to_i: dest:i src1:f len:49 float_conv_to_ovf_i: dest:a src1:f len:40 float_conv_to_ovd_u: dest:a src1:f len:40 -float_mul_ovf: +float_mul_ovf: float_ceq: dest:i src1:f src2:f len:35 float_cgt: dest:i src1:f src2:f len:35 float_cgt_un: dest:i src1:f src2:f len:48 @@ -711,11 +711,11 @@ unpack_highq: dest:x src1:x src2:x len:5 clob:1 unpack_highps: dest:x src1:x src2:x len:5 clob:1 unpack_highpd: dest:x src1:x src2:x len:5 clob:1 -packw: dest:x src1:x src2:x len:5 clob:1 -packd: dest:x src1:x src2:x len:5 clob:1 +packw: dest:x src1:x src2:x len:5 clob:1 +packd: dest:x src1:x src2:x len:5 clob:1 -packw_un: dest:x src1:x src2:x len:5 clob:1 -packd_un: dest:x src1:x src2:x len:6 clob:1 +packw_un: dest:x src1:x src2:x len:5 clob:1 +packd_un: dest:x src1:x src2:x len:6 clob:1 paddb_sat: dest:x src1:x src2:x len:5 clob:1 paddb_sat_un: dest:x src1:x src2:x len:5 clob:1 @@ -782,7 +782,7 @@ extract_i2: dest:i src1:x len:13 extract_u2: dest:i src1:x len:13 extract_i1: dest:i src1:x len:13 extract_u1: dest:i src1:x len:13 -extract_r8: dest:f src1:x len:5 +extract_r8: dest:f src1:x len:5 iconv_to_r4_raw: dest:f src1:i len:10 @@ -804,7 +804,7 @@ loadx_aligned_membase: dest:x src1:b len:7 storex_aligned_membase_reg: dest:b src1:x len:7 storex_nta_membase_reg: dest:b src1:x len:7 -fconv_to_r8_x: dest:x src1:f len:4 +fconv_to_r8_x: dest:x src1:f len:4 xconv_r8_to_i4: dest:y src1:x len:7 prefetch_membase: src1:b len:4 diff --git a/src/mono/mono/mini/cpu-arm.md b/src/mono/mono/mini/cpu-arm.md index f3b6641d3f53d..a58d4bb0e29bd 100644 --- a/src/mono/mono/mini/cpu-arm.md +++ b/src/mono/mono/mini/cpu-arm.md @@ -130,8 +130,8 @@ storei2_membase_imm: dest:b len:20 storei2_membase_reg: dest:b src1:i len:12 storei4_membase_imm: dest:b len:20 storei4_membase_reg: dest:b src1:i len:20 -storei8_membase_imm: dest:b -storei8_membase_reg: dest:b src1:i +storei8_membase_imm: dest:b +storei8_membase_reg: dest:b src1:i storer4_membase_reg: dest:b src1:f len:60 storer8_membase_reg: dest:b src1:f len:24 store_memindex: dest:b src1:i src2:i len:4 diff --git a/src/mono/mono/mini/cpu-mips.md b/src/mono/mono/mini/cpu-mips.md index cbe7788a6b0f0..96920a0747b52 100644 --- a/src/mono/mono/mini/cpu-mips.md +++ b/src/mono/mono/mini/cpu-mips.md @@ -118,7 +118,7 @@ storei2_membase_imm: dest:b len:20 storei2_membase_reg: dest:b src1:i len:20 storei4_membase_imm: dest:b len:20 storei4_membase_reg: dest:b src1:i len:20 -storei8_membase_imm: dest:b +storei8_membase_imm: dest:b storei8_membase_reg: dest:b src1:i len:20 storer4_membase_reg: dest:b src1:f len:20 storer8_membase_reg: dest:b src1:f len:20 @@ -156,7 +156,7 @@ add_imm: dest:i src1:i len:12 sub_imm: dest:i src1:i len:12 mul_imm: dest:i src1:i len:20 # there is no actual support for division or reminder by immediate -# we simulate them, though (but we need to change the burg rules +# we simulate them, though (but we need to change the burg rules # to allocate a symbolic reg for src2) div_imm: dest:i src1:i src2:i len:20 div_un_imm: dest:i src1:i src2:i len:12 @@ -346,7 +346,7 @@ long_xor_imm: dest:i src1:i clob:1 len:4 lcompare: src1:i src2:i len:4 lcompare_imm: src1:i len:12 -long_conv_to_r_un: dest:f src1:i src2:i len:37 +long_conv_to_r_un: dest:f src1:i src2:i len:37 float_beq: len:16 float_bne_un: len:16 diff --git a/src/mono/mono/mini/cpu-ppc.md b/src/mono/mono/mini/cpu-ppc.md index cb31d18c908cf..6ef33f2887220 100644 --- a/src/mono/mono/mini/cpu-ppc.md +++ b/src/mono/mono/mini/cpu-ppc.md @@ -137,7 +137,7 @@ add_imm: dest:i src1:i len:4 sub_imm: dest:i src1:i len:4 mul_imm: dest:i src1:i len:4 # there is no actual support for division or reminder by immediate -# we simulate them, though (but we need to change the burg rules +# we simulate them, though (but we need to change the burg rules # to allocate a symbolic reg for src2) div_imm: dest:i src1:i src2:i len:20 div_un_imm: dest:i src1:i src2:i len:12 @@ -164,8 +164,8 @@ cond_exc_no: len:8 cond_exc_c: len:12 cond_exc_nc: len:8 long_conv_to_ovf_i: dest:i src1:i src2:i len:32 -long_mul_ovf: -long_conv_to_r_un: dest:f src1:i src2:i len:37 +long_mul_ovf: +long_conv_to_r_un: dest:f src1:i src2:i len:37 float_beq: len:8 float_bne_un: len:8 float_blt: len:8 diff --git a/src/mono/mono/mini/cpu-ppc64.md b/src/mono/mono/mini/cpu-ppc64.md index f0651f5f8d05a..fc0e634baa460 100644 --- a/src/mono/mono/mini/cpu-ppc64.md +++ b/src/mono/mono/mini/cpu-ppc64.md @@ -141,7 +141,7 @@ add_imm: dest:i src1:i len:4 sub_imm: dest:i src1:i len:4 mul_imm: dest:i src1:i len:4 # there is no actual support for division or reminder by immediate -# we simulate them, though (but we need to change the burg rules +# we simulate them, though (but we need to change the burg rules # to allocate a symbolic reg for src2) div_imm: dest:i src1:i src2:i len:20 div_un_imm: dest:i src1:i src2:i len:12 diff --git a/src/mono/mono/mini/cpu-s390x.md b/src/mono/mono/mini/cpu-s390x.md index 34295c32bde99..5db404947639b 100644 --- a/src/mono/mono/mini/cpu-s390x.md +++ b/src/mono/mono/mini/cpu-s390x.md @@ -229,7 +229,7 @@ storei2_membase_reg: dest:b src1:i len:26 storei4_membase_imm: dest:b len:46 storei4_membase_reg: dest:b src1:i len:26 storei8_membase_imm: dest:b len:46 -storei8_membase_reg: dest:b src1:i len:26 +storei8_membase_reg: dest:b src1:i len:26 storer4_membase_reg: dest:b src1:f len:28 storer8_membase_reg: dest:b src1:f len:24 sub_imm: dest:i src1:i len:18 @@ -332,8 +332,8 @@ long_div: dest:i src1:i src2:i len:12 long_div_un: dest:i src1:i src2:i len:16 long_mul: dest:i src1:i src2:i len:12 long_mul_imm: dest:i src1:i len:20 -long_mul_ovf: dest:i src1:i src2:i len:56 -long_mul_ovf_un: dest:i src1:i src2:i len:64 +long_mul_ovf: dest:i src1:i src2:i len:56 +long_mul_ovf_un: dest:i src1:i src2:i len:64 long_and: dest:i src1:i src2:i len:8 long_or: dest:i src1:i src2:i len:8 long_xor: dest:i src1:i src2:i len:8 @@ -368,7 +368,7 @@ long_conv_to_u2: dest:i src1:i len:24 long_conv_to_u4: dest:i src1:i len:4 long_conv_to_u8: dest:i src1:i len:4 long_conv_to_u: dest:i src1:i len:4 -long_conv_to_r_un: dest:f src1:i len:37 +long_conv_to_r_un: dest:f src1:i len:37 long_beq: len:8 long_bge_un: len:8 @@ -399,7 +399,7 @@ int_conv_to_i: dest:i src1:i len:4 int_conv_to_u1: dest:i src1:i len:10 int_conv_to_u2: dest:i src1:i len:16 int_conv_to_u4: dest:i src1:i len:4 -int_conv_to_r_un: dest:f src1:i len:37 +int_conv_to_r_un: dest:f src1:i len:37 cond_exc_ic: len:8 cond_exc_ieq: len:8 @@ -431,9 +431,9 @@ vcall2_membase: src1:b len:12 clob:c vcall2_reg: src1:i len:8 clob:c s390_int_add_ovf: len:32 dest:i src1:i src2:i -s390_int_add_ovf_un: len:32 dest:i src1:i src2:i +s390_int_add_ovf_un: len:32 dest:i src1:i src2:i s390_int_sub_ovf: len:32 dest:i src1:i src2:i -s390_int_sub_ovf_un: len:32 dest:i src1:i src2:i +s390_int_sub_ovf_un: len:32 dest:i src1:i src2:i s390_long_add_ovf: dest:i src1:i src2:i len:32 s390_long_add_ovf_un: dest:i src1:i src2:i len:32 diff --git a/src/mono/mono/mini/cpu-sparc.md b/src/mono/mono/mini/cpu-sparc.md index 341b11e5b348f..403a73c380ee8 100644 --- a/src/mono/mono/mini/cpu-sparc.md +++ b/src/mono/mono/mini/cpu-sparc.md @@ -263,8 +263,8 @@ long_shl: dest:i src1:i src2:i len:64 long_shr: dest:i src1:i src2:i len:64 long_shr_un: dest:i src1:i src2:i len:64 long_conv_to_ovf_i: dest:i src1:i src2:i len:48 -long_mul_ovf: -long_conv_to_r_un: dest:f src1:i src2:i len:64 +long_mul_ovf: +long_conv_to_r_un: dest:f src1:i src2:i len:64 long_shr_imm: dest:i src1:i len:64 long_shr_un_imm: dest:i src1:i len:64 long_shl_imm: dest:i src1:i len:64 diff --git a/src/mono/mono/mini/cpu-x86.md b/src/mono/mono/mini/cpu-x86.md index 85e3ea7cf4ea1..a0f0fc14f15dd 100644 --- a/src/mono/mono/mini/cpu-x86.md +++ b/src/mono/mono/mini/cpu-x86.md @@ -25,9 +25,9 @@ # # len:number describe the maximun length in bytes of the instruction # number is a positive integer. If the length is not specified -# it defaults to zero. But lengths are only checked if the given opcode -# is encountered during compilation. Some opcodes, like CONV_U4 are -# transformed into other opcodes in the brg files, so they do not show up +# it defaults to zero. But lengths are only checked if the given opcode +# is encountered during compilation. Some opcodes, like CONV_U4 are +# transformed into other opcodes in the brg files, so they do not show up # during code generation. # # cost:number describe how many cycles are needed to complete the instruction (unused) @@ -180,8 +180,8 @@ storei2_membase_imm: dest:b len:11 storei2_membase_reg: dest:b src1:i len:7 storei4_membase_imm: dest:b len:10 storei4_membase_reg: dest:b src1:i len:7 -storei8_membase_imm: dest:b -storei8_membase_reg: dest:b src1:i +storei8_membase_imm: dest:b +storei8_membase_reg: dest:b src1:i storer4_membase_reg: dest:b src1:f len:7 storer8_membase_reg: dest:b src1:f len:7 load_membase: dest:i src1:b len:7 @@ -257,7 +257,7 @@ float_conv_to_u1: dest:y src1:f len:39 float_conv_to_i: dest:i src1:f len:39 float_conv_to_ovf_i: dest:a src1:f len:30 float_conv_to_ovd_u: dest:a src1:f len:30 -float_mul_ovf: +float_mul_ovf: float_ceq: dest:y src1:f src2:f len:25 float_cgt: dest:y src1:f src2:f len:25 float_cgt_un: dest:y src1:f src2:f len:37 @@ -425,7 +425,7 @@ cmov_ile_un: dest:i src1:i src2:i len:16 clob:1 cmov_ilt_un: dest:i src1:i src2:i len:16 clob:1 long_conv_to_ovf_i4_2: dest:i src1:i src2:i len:30 -long_conv_to_r8_2: dest:f src1:i src2:i len:14 +long_conv_to_r8_2: dest:f src1:i src2:i len:14 long_conv_to_r4_2: dest:f src1:i src2:i len:14 long_conv_to_r_un_2: dest:f src1:i src2:i len:40 @@ -565,11 +565,11 @@ unpack_highq: dest:x src1:x src2:x len:4 clob:1 unpack_highps: dest:x src1:x src2:x len:3 clob:1 unpack_highpd: dest:x src1:x src2:x len:4 clob:1 -packw: dest:x src1:x src2:x len:4 clob:1 -packd: dest:x src1:x src2:x len:4 clob:1 +packw: dest:x src1:x src2:x len:4 clob:1 +packd: dest:x src1:x src2:x len:4 clob:1 -packw_un: dest:x src1:x src2:x len:4 clob:1 -packd_un: dest:x src1:x src2:x len:5 clob:1 +packw_un: dest:x src1:x src2:x len:4 clob:1 +packd_un: dest:x src1:x src2:x len:5 clob:1 paddb_sat: dest:x src1:x src2:x len:4 clob:1 paddb_sat_un: dest:x src1:x src2:x len:4 clob:1 @@ -634,7 +634,7 @@ extract_i2: dest:i src1:x len:10 extract_u2: dest:i src1:x len:10 extract_i1: dest:i src1:x len:10 extract_u1: dest:i src1:x len:10 -extract_r8: dest:f src1:x len:8 +extract_r8: dest:f src1:x len:8 insert_i2: dest:x src1:x src2:i len:5 clob:1 @@ -653,7 +653,7 @@ loadx_aligned_membase: dest:x src1:b len:7 storex_aligned_membase_reg: dest:b src1:x len:7 storex_nta_membase_reg: dest:b src1:x len:7 -fconv_to_r8_x: dest:x src1:f len:14 +fconv_to_r8_x: dest:x src1:f len:14 xconv_r8_to_i4: dest:y src1:x len:7 prefetch_membase: src1:b len:4 diff --git a/src/mono/mono/tests/metadata-verifier/cli-blob-tests.md b/src/mono/mono/tests/metadata-verifier/cli-blob-tests.md index 89c2e87ce1c7d..b93426a47ff82 100644 --- a/src/mono/mono/tests/metadata-verifier/cli-blob-tests.md +++ b/src/mono/mono/tests/metadata-verifier/cli-blob-tests.md @@ -20,7 +20,7 @@ method-def-sig { invalid offset blob.i (table-row (6 0) + 10) + 1 set-byte 0x2E invalid offset blob.i (table-row (6 0) + 10) + 1 set-byte 0x2F - #upper nimble flags 0x80 is invalid + #upper nimble flags 0x80 is invalid invalid offset blob.i (table-row (6 0) + 10) + 1 set-bit 7 #sig is too small to decode param count @@ -89,7 +89,7 @@ method-def-ret-misc { method-ref-sig { assembly assembly-with-signatures.exe - #member ref 0 is has a vararg sig + #member ref 0 is has a vararg sig #member ref 1 don't use vararg #2 sentinels @@ -156,23 +156,23 @@ locals-sig { #bad local sig #row 0 has tons of locals - #row 1 is int32&, int32 + #row 1 is int32&, int32 #row 2 is typedref #typedref with byref - #row 1 is: cconv pcount(2) byref int32 int32 + #row 1 is: cconv pcount(2) byref int32 int32 #row 1 goes to: cconv pcount(2) byref typedbyref int32 invalid offset blob.i (table-row (0x11 1)) + 4 set-byte 0x16 #byref pinned int32 - #row 1 is: cconv pcount(2) byref int32 int32 + #row 1 is: cconv pcount(2) byref int32 int32 #row 1 goes to: cconv pcount(1) byref pinned int32 invalid offset blob.i (table-row (0x11 1)) + 2 set-byte 0x01, offset blob.i (table-row (0x11 1)) + 4 set-byte 0x45 #pinned pinned int32 - #row 1 is: cconv pcount(2) byref int32 int32 + #row 1 is: cconv pcount(2) byref int32 int32 #row 1 goes to: cconv pcount(1) pinned pinned int32 #LAMEIMPL MS doesn't care about this valid offset blob.i (table-row (0x11 1)) + 2 set-byte 0x01, @@ -215,7 +215,7 @@ type-enc { invalid offset blob.i (table-row (0x04 3) + 4) + 3 set-byte 0x16 #LAMEIMPL MS verifier doesn't catch this one (runtime does) - #rank 0 + #rank 0 invalid offset blob.i (table-row (0x04 3) + 4) + 4 set-byte 0x00 #large nsizes invalid offset blob.i (table-row (0x04 3) + 4) + 5 set-byte 0x1F @@ -240,7 +240,7 @@ type-enc { #fnptr #field 10 is a fnptr #format is: cconv FNPTR cconv pcount ret param* sentinel? param* - #LAMESPEC, it lacks the fact that fnptr allows for unmanaged call conv + #LAMESPEC, it lacks the fact that fnptr allows for unmanaged call conv #bad callconv invalid offset blob.i (table-row (0x04 10) + 4) + 3 set-byte 0x88 @@ -263,10 +263,10 @@ typespec-sig { #type zero is invalid invalid offset blob.i (table-row (0x1B 0)) + 1 set-byte 0x0 - #LAMESPEC part II, MS allows for cmods on a typespec as well + #LAMESPEC part II, MS allows for cmods on a typespec as well #modreq int32 is invalid #typespec 2 is "modreq int32*" encoded as: PTR CMOD_REQD token INT32 - #change int to CMOD_REQD token INT32 + #change int to CMOD_REQD token INT32 valid offset blob.i (table-row (0x1B 2)) + 1 set-byte 0x1f, #CMOD_REQD offset blob.i (table-row (0x1B 2)) + 2 set-byte read.byte (blob.i (table-row (0x1B 2)) + 3), #token offset blob.i (table-row (0x1B 2)) + 3 set-byte 0x08 #int8 @@ -321,7 +321,7 @@ method-header { #bad fat header flags #only 0x08 and 0x10 allowed - #regular value is + #regular value is invalid offset translate.rva.ind (table-row (0x06 1)) + 0 set-ushort 0x3033 #or 0x20 invalid offset translate.rva.ind (table-row (0x06 1)) + 0 set-ushort 0x3053 invalid offset translate.rva.ind (table-row (0x06 1)) + 0 set-ushort 0x3093 diff --git a/src/mono/mono/tests/metadata-verifier/cli-cattr-tests.md b/src/mono/mono/tests/metadata-verifier/cli-cattr-tests.md index ac6849b8a15b7..7d852b3a5f800 100644 --- a/src/mono/mono/tests/metadata-verifier/cli-cattr-tests.md +++ b/src/mono/mono/tests/metadata-verifier/cli-cattr-tests.md @@ -10,7 +10,7 @@ cattr-without-named-args { #WARNING: peverify don't check custom attributes format beyond the prolog #so it's pointless to use it for this. #We'll take the easy road as well and when verifying the encoded data - #assume that the target constructor can be decoded and use the runtime signature. + #assume that the target constructor can be decoded and use the runtime signature. #bad size invalid offset blob.i (table-row (0x0C 0) + 4) + 0 set-byte 0x0 diff --git a/src/mono/mono/tests/metadata-verifier/cli-global-props-tests.md b/src/mono/mono/tests/metadata-verifier/cli-global-props-tests.md index ccbdca2e319b8..481113390c385 100644 --- a/src/mono/mono/tests/metadata-verifier/cli-global-props-tests.md +++ b/src/mono/mono/tests/metadata-verifier/cli-global-props-tests.md @@ -24,4 +24,4 @@ fielddef-global-props { badrt offset table-row (4 1) + 2 set-ushort read.ushort (table-row (4 0) + 2), #name offset table-row (4 1) + 4 set-ushort read.ushort (table-row (4 0) + 4) #signature -} \ No newline at end of file +} diff --git a/src/mono/mono/tests/metadata-verifier/cli-metadata-tests.md b/src/mono/mono/tests/metadata-verifier/cli-metadata-tests.md index 1ce3b3e187a99..fd04a97195660 100644 --- a/src/mono/mono/tests/metadata-verifier/cli-metadata-tests.md +++ b/src/mono/mono/tests/metadata-verifier/cli-metadata-tests.md @@ -54,6 +54,6 @@ cli-metadata-stream-headers { #unkwnown name invalid offset stream-header ( 0 ) + 8 set-byte 0x42 - #duplicate name, change #~ to #US + #duplicate name, change #~ to #US invalid offset stream-header ( 0 ) + 9 set-byte 0x55 , offset stream-header ( 0 ) + 10 set-byte 0x53 -} \ No newline at end of file +} diff --git a/src/mono/mono/tests/metadata-verifier/cli-tables-tests.md b/src/mono/mono/tests/metadata-verifier/cli-tables-tests.md index cec99ac9f9d0e..5412ec803ef0e 100644 --- a/src/mono/mono/tests/metadata-verifier/cli-tables-tests.md +++ b/src/mono/mono/tests/metadata-verifier/cli-tables-tests.md @@ -5,7 +5,7 @@ tables-header { valid offset cli-metadata + read.uint ( stream-header ( 0 ) ) + 4 set-byte 2 valid offset tables-header + 4 set-byte 2 - #major/minor versions + #major/minor versions invalid offset tables-header + 4 set-byte 22 invalid offset tables-header + 5 set-byte 1 @@ -67,7 +67,7 @@ module-table { valid offset tables-header + 24 set-uint 1 invalid offset tables-header + 24 set-uint 0 invalid offset tables-header + 24 set-uint 2 , offset tables-header + 32 set-uint 1 - + #name #invalid string invalid offset table-row ( 0 0 ) + 2 set-ushort 0x8888 @@ -120,7 +120,7 @@ typedef-table { valid offset tables-header + 32 set-uint 2 invalid offset tables-header + 32 set-uint 0 - #This part of the test suite only verifies structural properties, not table relationships + #This part of the test suite only verifies structural properties, not table relationships #Flags invalid bits: 6,9,14,15,19,21,24-31 invalid offset table-row ( 2 1 ) set-bit 6 @@ -186,7 +186,7 @@ typedef-table-field-list { valid offset table-row ( 2 1 ) + 10 set-ushort 1 - #bad field list + #bad field list invalid offset table-row ( 2 1 ) + 10 set-ushort 999 #this type is bigger than the next @@ -202,7 +202,7 @@ typedef-table-method-list { valid offset table-row ( 2 1 ) + 12 set-ushort 1 - #bad field list + #bad field list invalid offset table-row ( 2 1 ) + 12 set-ushort 999 #this type is bigger than the next @@ -262,11 +262,11 @@ field-table { #if it's a global variable, it must be static and (public|compiler controler|private) (16) #static + compiler controled - valid offset table-row ( 2 1 ) + 10 set-ushort 2 , offset table-row ( 4 0 ) set-ushort 0x10 + valid offset table-row ( 2 1 ) + 10 set-ushort 2 , offset table-row ( 4 0 ) set-ushort 0x10 #static + private valid offset table-row ( 2 1 ) + 10 set-ushort 2 , offset table-row ( 4 0 ) set-ushort 0x11 #static + public - valid offset table-row ( 2 1 ) + 10 set-ushort 2 , offset table-row ( 4 0 ) set-ushort 0x16 + valid offset table-row ( 2 1 ) + 10 set-ushort 2 , offset table-row ( 4 0 ) set-ushort 0x16 #static + bad visibility #LAMEIMPL MS doesn't verify visibility invalid offset table-row ( 2 1 ) + 10 set-ushort 2 , offset table-row ( 4 0 ) set-ushort 0x12 @@ -275,7 +275,7 @@ field-table { invalid offset table-row ( 2 1 ) + 10 set-ushort 2 , offset table-row ( 4 0 ) set-ushort 0x15 #public and not static - invalid offset table-row ( 2 1 ) + 10 set-ushort 2 , offset table-row ( 4 0 ) set-ushort 0x06 + invalid offset table-row ( 2 1 ) + 10 set-ushort 2 , offset table-row ( 4 0 ) set-ushort 0x06 #field is constant but has no row in the contant table #LAMESPEC this check is missing from the spec @@ -300,7 +300,7 @@ methoddef-table { #bad flags (4) #no unused bits - + #invalid .ctor with generic params and specialname (6) #method 0 is a .ctor, method 1 is generic invalid offset table-row ( 6 1 ) + 6 or-ushort 0x1800 , offset table-row ( 6 1 ) + 8 set-ushort read.ushort ( table-row ( 6 0 ) + 8 ) @@ -343,8 +343,8 @@ methoddef-table { #Interface cannot have .ctors (15) #method 3 belongs to an inteface invalid offset table-row ( 6 3 ) + 8 set-ushort read.ushort ( table-row ( 6 0 ) + 8 ) - #Interface methods can't be static - invalid offset table-row ( 6 3 ) + 6 or-ushort 0x0010 + #Interface methods can't be static + invalid offset table-row ( 6 3 ) + 6 or-ushort 0x0010 #XXX we don't care about CLS names (17) @@ -377,7 +377,7 @@ methoddef-table { invalid offset table-row ( 6 5 ) set-uint read.uint ( table-row ( 6 2 ) ) #pinvoke with runtime - #LAMEIMPL/SPEC either MS ignores it or the spec is ill defined + #LAMEIMPL/SPEC either MS ignores it or the spec is ill defined #invalid offset table-row ( 6 5 ) + 4 or-ushort 0x1000 #if compilercontroled (0x0) it must have an RVA or a pinvoke @@ -389,7 +389,7 @@ methoddef-table { #if RVA = 0 then one of (abstract, runtime, pinvoke) (34) #let's test with an abstract class, method 6 is abstract and belongs to one. invalid offset table-row ( 6 7 ) + 6 set-ushort 0x0006 - #icall + #icall valid offset table-row ( 6 7 ) + 6 set-ushort 0x01c6 , offset table-row ( 6 7 ) + 4 or-ushort 0x1000 #if rva != 0 then abstract == 0 and codetypemask must be (native,cil,runtime) and rva shall be valid (35) @@ -429,7 +429,7 @@ methoddef-table-global-methods { assembly assembly-with-global-method.exe #checks for methods owned by (20) - + #static + public valid offset table-row ( 6 0 ) + 6 set-ushort 0x0010 #static + private @@ -458,10 +458,10 @@ methoddef-table-global-methods { methoddef-table-params { assembly assembly-with-methods.exe - #method 12,13,14 have 3 params and params: 2,5,8 + #method 12,13,14 have 3 params and params: 2,5,8 #method 13 has 3 params and params: 5 invalid offset table-row ( 6 12 ) + 12 set-ushort 6 - invalid offset table-row ( 6 13 ) + 12 set-ushort 99 + invalid offset table-row ( 6 13 ) + 12 set-ushort 99 } @@ -489,7 +489,7 @@ param-table { invalid offset table-row ( 8 0 ) + 2 set-ushort 2 invalid offset table-row ( 8 1 ) + 2 set-ushort 1 - + #if HasDefault = 1 then there must be a row in the constant table (6) #param 2 doesn't have a default invalid offset table-row ( 8 2 ) or-ushort 0x1000 @@ -534,7 +534,7 @@ interfaceimpl-table { memberref-table { assembly assembly-with-complex-type.exe - + #class must be a valid token (1 2) #null invalid offset table-row ( 10 0 ) set-ushort 0 @@ -555,13 +555,13 @@ memberref-table { #signature must be valid (5) invalid offset table-row ( 10 0 ) + 4 set-ushort 0x9900 - + #TODO validate the signature (5) #LAMESPEC CompilerControled visibility (9,10) is nice but no impl care about - #LAMESPEC what does (11) mean? + #LAMESPEC what does (11) mean? } constant-table { @@ -588,7 +588,7 @@ constant-table { #First remove default from param 'a' (param table idx 0) #Then set the has default flag in the property table #Finally, make the first constant point from the part to the property (const 1, prop 0, token 0x6) - valid offset table-row ( 0x8 0 ) set-ushort 0 , offset table-row ( 0x17 0 ) or-ushort 0x1000 , offset table-row ( 0xB 1 ) + 2 set-ushort 0x6 + valid offset table-row ( 0x8 0 ) set-ushort 0 , offset table-row ( 0x17 0 ) or-ushort 0x1000 , offset table-row ( 0xB 1 ) + 2 set-ushort 0x6 #Invalid coded table invalid offset table-row ( 0xB 0 ) + 2 set-ushort 0x0013 , offset table-row ( 0x04 0 ) set-ushort 0x16 @@ -608,7 +608,7 @@ constant-table { invalid offset table-row ( 0xB 0 ) + 4 set-ushort read.uint ( stream-header ( 3 ) + 4 ) #LAMEIMPL, MS doesn't bound check the constant size. Lame of them. - invalid offset table-row ( 0xB 0 ) + 4 set-ushort read.uint ( stream-header ( 3 ) + 4 ) - 1 + invalid offset table-row ( 0xB 0 ) + 4 set-ushort read.uint ( stream-header ( 3 ) + 4 ) - 1 } cattr-table { @@ -708,7 +708,7 @@ class-layout-table { #packing must be (0,1,2,4,8,16,32,64,128) (4) invalid offset table-row ( 0xF 0 ) set-ushort 0x0003 - #TODO do checks depending on the kind of parent (4) + #TODO do checks depending on the kind of parent (4) #Check layout along the inheritance chain. (7) } @@ -753,7 +753,7 @@ event-table { assembly assembly-with-events.exe #event flags have valid bits (3) - #only bits 9 and 10 are used + #only bits 9 and 10 are used invalid offset table-row ( 0x14 0 ) set-bit 0 invalid offset table-row ( 0x14 0 ) set-bit 1 @@ -816,7 +816,7 @@ property-table { assembly assembly-with-properties.exe #valid flags (3) - #only bits 9, 10 and 12 are used + #only bits 9, 10 and 12 are used invalid offset table-row ( 0x17 0 ) set-bit 0 invalid offset table-row ( 0x17 0 ) set-bit 1 invalid offset table-row ( 0x17 0 ) set-bit 2 @@ -846,7 +846,7 @@ property-table { #field zero has default value valid offset table-row (0x17 0) + 0 or-ushort 0x1000, #mark the property with hasdefault offset table-row (0x04 0) + 0 set-ushort 0x0011, #clear literal and hasdefault from the field - offset table-row (0x0B 0) + 2 set-ushort 0x0006 #change the parent token to row 1 of the property table (0x2) + offset table-row (0x0B 0) + 2 set-ushort 0x0006 #change the parent token to row 1 of the property table (0x2) invalid offset table-row (0x17 0) + 0 or-ushort 0x1000 @@ -857,32 +857,32 @@ methodimpl-table { assembly assembly-with-complex-type.exe #class shall be valid (2) - invalid offset table-row (0x19 0) set-ushort 0 + invalid offset table-row (0x19 0) set-ushort 0 invalid offset table-row (0x19 0) set-ushort 0x8800 #methodbody shall be valid (3) #null - invalid offset table-row (0x19 0) + 2 set-ushort 0x0000 + invalid offset table-row (0x19 0) + 2 set-ushort 0x0000 invalid offset table-row (0x19 0) + 2 set-ushort 0x0001 #out of range - invalid offset table-row (0x19 0) + 2 set-ushort 0x8800 + invalid offset table-row (0x19 0) + 2 set-ushort 0x8800 invalid offset table-row (0x19 0) + 2 set-ushort 0x8801 #MethodDeclaration shall be valid #null - invalid offset table-row (0x19 0) + 4 set-ushort 0x0000 + invalid offset table-row (0x19 0) + 4 set-ushort 0x0000 invalid offset table-row (0x19 0) + 4 set-ushort 0x0001 #out of range - invalid offset table-row (0x19 0) + 4 set-ushort 0x8800 + invalid offset table-row (0x19 0) + 4 set-ushort 0x8800 invalid offset table-row (0x19 0) + 4 set-ushort 0x8801 - - #TODO check MethodDeclaration method for virtual and owner type for !sealed (4,5) + + #TODO check MethodDeclaration method for virtual and owner type for !sealed (4,5) #TODO check MethodBody for belonging to a super type of Class,been virtual and rva != 0 (6,7,8) #TODO check MethodBody must belong to any ancestor or iface of Class (9) #TODO check MethodDeclaration method shall not be final (10) #TODO if MethodDeclaration is strict, it must be visible to Class (11) - #TODO the method signature of MethodBody must match of MethodDeclaration (12) + #TODO the method signature of MethodBody must match of MethodDeclaration (12) #TODO no dups } @@ -959,7 +959,7 @@ fieldrva-table { invalid offset table-row (0x1D 0) + 4 set-ushort 0, offset table-row (0x04 17) set-ushort 0x0013 #remove fieldrva from target field invalid offset table-row (0x1D 0) + 4 set-ushort 0x9901, - offset table-row (0x04 17) set-ushort 0x0013 + offset table-row (0x04 17) set-ushort 0x0013 #TODO verify if the field is a blitable valuetype @@ -974,7 +974,7 @@ assembly-table { invalid offset tables-header + 40 set-uint 2, offset stream-header (0) + 4 set-uint read.uint (stream-header (0) + 4) + 22 #increase the size of the #~ section - #bad hasalg (2) + #bad hasalg (2) valid offset table-row (0x20 0) set-uint 0 valid offset table-row (0x20 0) set-uint 0x8003 valid offset table-row (0x20 0) set-uint 0x8004 @@ -997,7 +997,7 @@ assembly-table { #valid pub key (5) - valid offset table-row (0x20 0) + 16 set-ushort 0 + valid offset table-row (0x20 0) + 16 set-ushort 0 invalid offset table-row (0x20 0) + 16 set-ushort 0x9990 #name is a valid non-empty string (5) @@ -1005,7 +1005,7 @@ assembly-table { invalid offset table-row (0x20 0) + 18 set-ushort 0x9990 #culture is an optional valid non-empty string (8) - valid offset table-row (0x20 0) + 20 set-ushort 0 + valid offset table-row (0x20 0) + 20 set-ushort 0 invalid offset table-row (0x20 0) + 20 set-ushort 0x9990 #TODO check if culture is one of the listed cultures (9) (23.1.3) @@ -1036,7 +1036,7 @@ assembly-ref-table { invalid offset table-row (0x23 0) + 14 set-ushort 0 #culture is an optional valid non-empty string (6) - valid offset table-row (0x23 0) + 16 set-ushort 0 + valid offset table-row (0x23 0) + 16 set-ushort 0 invalid offset table-row (0x23 0) + 16 set-ushort 0x9990 #TODO check if culture is one of the listed cultures (7) (23.1.3) @@ -1104,11 +1104,11 @@ exported-type-table { #if Implementation points to exported type table visibility must be nested public (5) #invalid offset table-row (0x27 1) set-uint 0x100005 #LAMEIMPL/SPEC this check is not really relevant - + #typename is a valid non-empty string (7) invalid offset table-row (0x27 0) + 8 set-ushort 0 invalid offset table-row (0x27 0) + 8 set-ushort 0x9900 - + #typenamedpace is a valid string (8,9) invalid offset table-row (0x27 0) + 10 set-ushort 0x9900 @@ -1151,13 +1151,13 @@ manifest-resource-table { valid offset table-row (0x28 0) + 10 set-ushort 0, offset table-row (0x28 0) + 0 set-uint 1 - #LAMEIMPL it doesn't check the resource offset! + #LAMEIMPL it doesn't check the resource offset! invalid offset table-row (0x28 0) + 10 set-ushort 0, offset table-row (0x28 0) + 0 set-uint 0x990000 - + #implementation is a valid token (8) - #does it accept exported type? + #does it accept exported type? invalid offset table-row (0x28 0) + 10 set-ushort 0x0006 #coded table 4 is invalid @@ -1169,7 +1169,7 @@ manifest-resource-table { #if implementation point to a file it's index must be zero (10) #row 0 is a file resource invalid offset table-row (0x28 0) set-uint 1 - + #TODO check for dups (9) } @@ -1185,7 +1185,7 @@ nested-class-table { invalid offset table-row (0x29 0) + 2 set-ushort read.ushort (table-row (0x29 0)) - #TODO check for dups based on nestedclass (5) + #TODO check for dups based on nestedclass (5) } @@ -1214,7 +1214,7 @@ generic-param-table { invalid offset table-row (0x2A 0) + 4 set-ushort 0x8800 invalid offset table-row (0x2A 0) + 4 set-ushort 0x8801 - #bad or empty name + #bad or empty name invalid offset table-row (0x2A 0) + 6 set-ushort 0 invalid offset table-row (0x2A 0) + 6 set-ushort 0x8800 diff --git a/src/mono/mono/tests/metadata-verifier/data-directory-tests.md b/src/mono/mono/tests/metadata-verifier/data-directory-tests.md index 03be4f38c5fcf..d58e567e9b93d 100644 --- a/src/mono/mono/tests/metadata-verifier/data-directory-tests.md +++ b/src/mono/mono/tests/metadata-verifier/data-directory-tests.md @@ -3,18 +3,18 @@ pe-data-directories-export-table { assembly simple-assembly.exe #zero is fine - valid offset pe-optional-header + 96 set-uint 0 + valid offset pe-optional-header + 96 set-uint 0 valid offset pe-optional-header + 100 set-uint 0 #RVA must be zero - invalid offset pe-optional-header + 96 set-uint 0x2000 , offset pe-optional-header + 100 set-uint 10 + invalid offset pe-optional-header + 96 set-uint 0x2000 , offset pe-optional-header + 100 set-uint 10 } pe-data-directories-import-table { #Simple assembly has 2 sections since it doesn't have any resources assembly simple-assembly.exe - + #The IT is 40 bytes long invalid offset pe-optional-header + 108 set-uint 0 invalid offset pe-optional-header + 108 set-uint 8 @@ -34,35 +34,35 @@ pe-data-directories-bad-tables { #export invalid offset pe-optional-header + 96 set-uint 0x2000 - #exception - invalid offset pe-optional-header + 120 set-uint 0x2000 + #exception + invalid offset pe-optional-header + 120 set-uint 0x2000 #certificate some assemblies have it. - #invalid offset pe-optional-header + 128 set-uint 0x2000 + #invalid offset pe-optional-header + 128 set-uint 0x2000 - #debug MS uses it for putting debug info in the assembly - #invalid offset pe-optional-header + 144 set-uint 0x2000 + #debug MS uses it for putting debug info in the assembly + #invalid offset pe-optional-header + 144 set-uint 0x2000 - #copyright - invalid offset pe-optional-header + 152 set-uint 0x2000 + #copyright + invalid offset pe-optional-header + 152 set-uint 0x2000 - #global ptr - invalid offset pe-optional-header + 160 set-uint 0x2000 + #global ptr + invalid offset pe-optional-header + 160 set-uint 0x2000 - #tls table - invalid offset pe-optional-header + 168 set-uint 0x2000 + #tls table + invalid offset pe-optional-header + 168 set-uint 0x2000 - #load config - invalid offset pe-optional-header + 176 set-uint 0x2000 + #load config + invalid offset pe-optional-header + 176 set-uint 0x2000 - #bound import - invalid offset pe-optional-header + 184 set-uint 0x2000 + #bound import + invalid offset pe-optional-header + 184 set-uint 0x2000 #delay import - invalid offset pe-optional-header + 200 set-uint 0x2000 + invalid offset pe-optional-header + 200 set-uint 0x2000 #reserved import - invalid offset pe-optional-header + 216 set-uint 0x2000 + invalid offset pe-optional-header + 216 set-uint 0x2000 } diff --git a/src/mono/mono/tests/metadata-verifier/header-tests.md b/src/mono/mono/tests/metadata-verifier/header-tests.md index 4534ba2299da4..4ce8616a6a74c 100644 --- a/src/mono/mono/tests/metadata-verifier/header-tests.md +++ b/src/mono/mono/tests/metadata-verifier/header-tests.md @@ -24,21 +24,21 @@ msdos-lfanew { invalid offset 0x3f truncate #not enough space for the PE water mark - invalid offset 0x3c set-uint 0xffffffff - invalid offset 0x3c set-uint file-size - 1 + invalid offset 0x3c set-uint 0xffffffff + invalid offset 0x3c set-uint file-size - 1 invalid offset 0x3c set-uint file-size - 2 } pe-signature { assembly simple-assembly.exe - valid offset pe-signature + 0 set-byte 'P' - valid offset pe-signature + 1 set-byte 'E' + valid offset pe-signature + 0 set-byte 'P' + valid offset pe-signature + 1 set-byte 'E' valid offset pe-signature + 2 set-byte 0 valid offset pe-signature + 3 set-byte 0 - invalid offset pe-signature + 0 set-byte 'M' - invalid offset pe-signature + 1 set-byte 'K' + invalid offset pe-signature + 0 set-byte 'M' + invalid offset pe-signature + 1 set-byte 'K' invalid offset pe-signature + 2 set-byte 1 invalid offset pe-signature + 3 set-byte 2 @@ -100,7 +100,7 @@ pe-optional-header-standard-fields { valid offset pe-optional-header + 3 set-byte 0 valid offset pe-optional-header + 3 set-byte 99 - + #Code size is just an informative field as well, nobody cares valid offset pe-optional-header + 4 set-uint 0 valid offset pe-optional-header + 4 set-uint 0x999999 diff --git a/src/mono/mono/tests/metadata-verifier/resources-tests.md b/src/mono/mono/tests/metadata-verifier/resources-tests.md index 2443d1f5d06a2..06f5d1e09403f 100644 --- a/src/mono/mono/tests/metadata-verifier/resources-tests.md +++ b/src/mono/mono/tests/metadata-verifier/resources-tests.md @@ -15,4 +15,4 @@ resources-master-directory { invalid offset translate.rva.ind ( pe-optional-header + 112 ) + 14 set-ushort 0x9999 #I won't check anything more than that for now as this is only used by out asp.net stack. -} \ No newline at end of file +} diff --git a/src/mono/mono/tests/metadata-verifier/section-table-tests.md b/src/mono/mono/tests/metadata-verifier/section-table-tests.md index 07f85ecda0e4f..8bb851119320e 100644 --- a/src/mono/mono/tests/metadata-verifier/section-table-tests.md +++ b/src/mono/mono/tests/metadata-verifier/section-table-tests.md @@ -25,7 +25,7 @@ pe-section-headers { #VirtualSize = file size + PointerToRawData + 32 invalid offset section-table + 16 set-uint file-size - read.uint ( section-table + 20 ) + 32 invalid offset section-table + 56 set-uint file-size - read.uint ( section-table + 60 ) + 32 - + invalid offset section-table + 60 set-uint 90000 #FIXME add section relocation tests @@ -35,12 +35,12 @@ pe-section-header-flags { #Simple assembly has 2 sections since it doesn't have any resources assembly simple-assembly.exe - #first section is always text + #first section is always text valid offset section-table + 36 set-uint 0x60000020 valid offset section-table + 76 set-uint 0x42000040 - + invalid offset section-table + 36 set-uint 0 invalid offset section-table + 36 set-uint 0xFFFFFFFF -} \ No newline at end of file +} diff --git a/src/tests/Interop/ReadMe.md b/src/tests/Interop/ReadMe.md index dc61c7afc87f9..debaeb759e657 100644 --- a/src/tests/Interop/ReadMe.md +++ b/src/tests/Interop/ReadMe.md @@ -53,7 +53,7 @@ Testing P/Invoke has two aspects: The Marshal API surface area testing is traditionally done via unit testing and far better suited in the [library test folder](https://github.com/dotnet/runtime/tree/master/src/libraries/System.Runtime.InteropServices/tests). Cases where testing the API surface area requires native tests assets will be performed in the [coreclr test folder](https://github.com/dotnet/runtime/tree/master/src/coreclr/tests/src/Interop) repo. -### NativeLibrary +### NativeLibrary This series has unit tests corresponding to `System.Runtime.NativeLibrary` APIs and related events in `System.Runtime.Loader.AssemblyLoadContext`. diff --git a/src/tests/performance/Scenario/JitBench/unofficial_dotnet/README.md b/src/tests/performance/Scenario/JitBench/unofficial_dotnet/README.md index 5e6b5fec1f94c..a4f25ca579d66 100644 --- a/src/tests/performance/Scenario/JitBench/unofficial_dotnet/README.md +++ b/src/tests/performance/Scenario/JitBench/unofficial_dotnet/README.md @@ -7,8 +7,8 @@ JitBench is a collection of scenario benchmarks that were originally designed to Execute 'dotnet run' in this directory. The test should eventually produce output like this: - - + + === CONFIGURATION === DotnetFrameworkVersion: 2.1.0-preview2-26131-06 @@ -97,12 +97,12 @@ In the Benchmarks folder create a new .cs file that implements a class deriving - ExePath - WorkingDirPath -- EnvironmentVariables (optional) +- EnvironmentVariables (optional) to determine what process will be invoked later when the benchmark runs. BuildHelloWorldBenchmark.cs is a simple example if you need a template to copy. MusicStore is a bit more sophisticated and shows gathering custom metrics + customizing the Benchview output. ## Automation -This how we currently setup to run the test in CI and then retrieve its results. +This how we currently setup to run the test in CI and then retrieve its results. **Setup:** diff --git a/src/tests/profiler/native/README.md b/src/tests/profiler/native/README.md index 439db297539b5..2c3f78d1ec044 100644 --- a/src/tests/profiler/native/README.md +++ b/src/tests/profiler/native/README.md @@ -47,4 +47,4 @@ When you want to test new profiler APIs you will need a new test profiler implem // add new profilers here }; -3) Override the profiler callback functions that are relevant for your test and delete the rest. At minimum you will need to ensure that the test prints the phrase "PROFILER TEST PASSES" at some point to indicate this is a passing test. Typically that occurs in the Shutdown() method. It is also likely you want to override Initialize() in order to call SetEventMask so that the profiler receives events. \ No newline at end of file +3) Override the profiler callback functions that are relevant for your test and delete the rest. At minimum you will need to ensure that the test prints the phrase "PROFILER TEST PASSES" at some point to indicate this is a passing test. Typically that occurs in the Shutdown() method. It is also likely you want to override Initialize() in order to call SetEventMask so that the profiler receives events. diff --git a/tools-local/dotnet-deb-tool/README.md b/tools-local/dotnet-deb-tool/README.md index 1cc7270486c2b..7a6a2f0408929 100644 --- a/tools-local/dotnet-deb-tool/README.md +++ b/tools-local/dotnet-deb-tool/README.md @@ -1,6 +1,6 @@ # Debian Package Tool -This is a tool which simplifies the creation process of a debian package. +This is a tool which simplifies the creation process of a debian package. Use of this tool requires creating a json configuration, and appropriate directory structure with your desired files to be included. @@ -8,7 +8,7 @@ directory structure with your desired files to be included. ## Usage ``` -Usage: package_tool [-i ] [-o ] +Usage: package_tool [-i ] [-o ] [-n ] [-v ] [-h] REQUIRED: @@ -65,7 +65,7 @@ Note: Use the commentless version [here](example_config.json). "control": { // (optional) "priority":"standard", // (optional default="standard") https://www.debian.org/doc/debian-policy/ch-archive.html#s-priorities "section":"devel", // (optional default="misc") https://www.debian.org/doc/debian-policy/ch-archive.html#s-subsections - "architecture":"all" // (optional default="all" ) + "architecture":"all" // (optional default="all" ) }, "copyright": "2015 Microsoft", // [required] @@ -78,10 +78,10 @@ Note: Use the commentless version [here](example_config.json). "package_name": { "package_version" : "1.0.0" // (optional within package_name no default) } - }, + }, "symlinks": { // (optional no defaults) - "path_relative_to_package_root/test_exe.sh" : "usr/bin/test_exe.sh" + "path_relative_to_package_root/test_exe.sh" : "usr/bin/test_exe.sh" } } ``` From 29e7d8e293c5172b87874aaf5a2915cadb3f74d7 Mon Sep 17 00:00:00 2001 From: Youssef Victor Date: Sat, 15 Aug 2020 20:36:34 +0200 Subject: [PATCH 06/23] Delete workflows --- .../markdownlint-problem-matcher.json | 17 ---------- .github/workflows/markdownlint.yml | 32 ------------------- .markdownlint.json | 6 ---- 3 files changed, 55 deletions(-) delete mode 100644 .github/workflows/markdownlint-problem-matcher.json delete mode 100644 .github/workflows/markdownlint.yml delete mode 100644 .markdownlint.json diff --git a/.github/workflows/markdownlint-problem-matcher.json b/.github/workflows/markdownlint-problem-matcher.json deleted file mode 100644 index f0741f6b90626..0000000000000 --- a/.github/workflows/markdownlint-problem-matcher.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "problemMatcher": [ - { - "owner": "markdownlint", - "pattern": [ - { - "regexp": "^([^:]*):(\\d+):?(\\d+)?\\s([\\w-\\/]*)\\s(.*)$", - "file": 1, - "line": 2, - "column": 3, - "code": 4, - "message": 5 - } - ] - } - ] -} diff --git a/.github/workflows/markdownlint.yml b/.github/workflows/markdownlint.yml deleted file mode 100644 index 8e7a9afb70594..0000000000000 --- a/.github/workflows/markdownlint.yml +++ /dev/null @@ -1,32 +0,0 @@ -name: Markdownlint - -on: - push: - paths: - - "**/*.md" - - ".markdownlint.json" - - ".github/workflows/markdownlint.yml" - - ".github/workflows/markdownlint-problem-matcher.json" - pull_request: - paths: - - "**/*.md" - - ".markdownlint.json" - - ".github/workflows/markdownlint.yml" - - ".github/workflows/markdownlint-problem-matcher.json" - -jobs: - lint: - - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v2 - - name: Use Node.js - uses: actions/setup-node@v1 - with: - node-version: 12.x - - name: Run Markdownlint - run: | - echo "::add-matcher::.github/workflows/markdownlint-problem-matcher.json" - npm i -g markdownlint-cli - markdownlint "**/*.md" diff --git a/.markdownlint.json b/.markdownlint.json deleted file mode 100644 index 3cff3f10aba03..0000000000000 --- a/.markdownlint.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "default": false, - "MD009": { - "br_spaces": 0 - }, -} From 36f6ab99a6ab84b55bd64af888a194631afcb1c0 Mon Sep 17 00:00:00 2001 From: Youssef Victor Date: Sun, 16 Aug 2020 09:59:46 +0200 Subject: [PATCH 07/23] Some manual fixes --- docs/design/coreclr/jit/jit-call-morphing.md | 2 -- .../profiling/davbr-blog-archive/Attach.md | 20 +++++++------------ .../profiling/davbr-blog-archive/Attach2.md | 2 -- ...ckSnapshot - Callback CONTEXT Registers.md | 9 +++------ .../DoStackSnapshot - Exception Filters.md | 6 +++--- .../Generics and Your Profiler.md | 4 ---- ... Tokens, Run-Time IDs, and Type Loading.md | 4 ---- .../davbr-blog-archive/ReJIT - The Basics.md | 10 ---------- 8 files changed, 13 insertions(+), 44 deletions(-) diff --git a/docs/design/coreclr/jit/jit-call-morphing.md b/docs/design/coreclr/jit/jit-call-morphing.md index c454690e21df2..5c99ac2939125 100644 --- a/docs/design/coreclr/jit/jit-call-morphing.md +++ b/docs/design/coreclr/jit/jit-call-morphing.md @@ -17,7 +17,6 @@ post/pre increment, perhaps like this: `Foo(j, a[j++])`. Here `j` is updated vi when the second arg is evaluated, so the earlier uses of `j` would need to be evaluated and saved in a new LclVar. - One simple approach would be to create new single definition, single use LclVars for every argument that is passed. This would preserve the evaluation order. However, it would potentially create hundreds of LclVar for moderately sized methods and that would overflow the limited number of @@ -25,7 +24,6 @@ tracked local variables in the JIT. One observation is that many arguments to m either constants or LclVars and can be set up anytime we want. They usually will not need a new LclVar to preserve the order of evaluation rule. - Each argument is an arbitrary expression tree. The JIT tracks a summary of observable side-effects using a set of five bit flags in every GenTree node: `GTF_ASG`, `GTF_CALL`, `GTF_EXCEPT`, `GTF_GLOB_REF`, and `GTF_ORDER_SIDEEFF`. These flags are propagated up the tree so that the top node has a particular diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/Attach.md b/docs/design/coreclr/profiling/davbr-blog-archive/Attach.md index 53f17e2316c3d..f9f08680f24f6 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/Attach.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/Attach.md @@ -7,8 +7,6 @@ Profiler attach is a feature that allows you to attach a profiler to an already Please note! You can't just take any profiler you bought and suddenly be able to attach it to a running application. The profiler must be built with "attachability" in mind. So if you're a profiler developer looking to pump some attachability into your product, read on--this article is for you. Everyone else, this article will probably be less useful--but just as riveting. -# - # The Players So how do you get your profiler attached to a running process? The process has already started, and the CLR code which interrogates the environment to determine whether to load a profiler has already run. So how do you kick the process into loading your profiler? The answer: Another process! @@ -57,8 +55,6 @@ From your InitializeForAttach implementation, your profiler will call SetEventMa It was impossible to enable all profiling scenarios for attach in the time we had for the V4 release. So only profilers that do **sampling** and **memory** analysis will function properly after attaching to a live process. Attempts to use other profiling APIs after attach will be met with CORPROF\_E\_UNSUPPORTED\_FOR\_ATTACHING\_PROFILER. -### - ## Specific Callback Limitations When your attaching profiler calls SetEventMask, you will be limited to only those event mask flags present in the COR\_PRF\_ALLOWABLE\_AFTER\_ATTACH bitmask (you'll find it in corprof.idl). Any other flags, and SetEventMask will return CORPROF\_E\_UNSUPPORTED\_FOR\_ATTACHING\_PROFILER. @@ -96,15 +92,13 @@ So here's the catch. What if a V4 app starts up in background GC mode _without_ Of course, you could forcibly turn off concurrent / background mode every time the app starts up via a config file: -| - -\ - \ - \ - \ -\ - - | +```xml + + + + + +``` But you don't really want to be running your apps with a sub-optimal GC mode all the time, just on the off-chance you might need to attach a memory profiler to it. If you suspect you might need to do some memory profiling of a client app, you should just start up your app with the memory profiler to begin with. diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/Attach2.md b/docs/design/coreclr/profiling/davbr-blog-archive/Attach2.md index 10847f05b9c74..6ff0409ebebd9 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/Attach2.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/Attach2.md @@ -151,7 +151,5 @@ It may be beneficial to program your profiler such that, upon attaching to the p It’s worth reiterating a limitation I stated in the first attach post (linked above): the ObjectAllocated() callback is unavailable to profilers that attach to running processes. Therefore, any logic your profiler has that assumes it gets all the ObjectAllocated() callbacks will need to be addressed. Any objects newly allocated since the last GC may still be unknown to your profiler until it comes across their references via GC callbacks during the next GC (unless your profiler comes across those objects in other ways—example: as parameters to methods you hook with the Enter/Leave/Tailcall probes). - - OK, that about covers the first steps your profiler should take once it attaches to a running process. It will either need to use lazy catch-up or the catch-up enumerations (or, quite likely, a combination of both). When using the enumerations, be careful to avoid holes (by calling the enumeration methods from inside ProfilerAttachComplete()), and be resilient to receiving information duplicated across the enumeration and the load / unload events. For memory profilers, be wary of GCs already in progress at the time your profiler attaches, and consider inducing your own GC at attach-time to build your initial cache of GC objects. diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/DoStackSnapshot - Callback CONTEXT Registers.md b/docs/design/coreclr/profiling/davbr-blog-archive/DoStackSnapshot - Callback CONTEXT Registers.md index ede4e736d3995..784e65b09103b 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/DoStackSnapshot - Callback CONTEXT Registers.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/DoStackSnapshot - Callback CONTEXT Registers.md @@ -5,9 +5,6 @@ In my initial [post](DoStackSnapshot - Exception Filters.md) about DoStackSnapsh The quick answer is that **nonvolatile (i.e., preserved), integer registers** should be valid. You don't really need many registers to walk the stack anyway. Obviously, you want a good stack pointer and instruction pointer. And hey, a frame pointer is handy when you come across an EBP-based frame in x86 (RBP on x64). These are all included in the set, of course. Specifically by architecture, you can trust these fields in your context: -x86: Edi, Esi, Ebx, Ebp, Esp, Eip -x64: Rdi, Rsi, Rbx, Rbp, Rsp, Rip, R12:R15 -ia64: IntS0:IntS3, RsBSP, StIFS, RsPFS, IntSp, StIIP, StIPSR - - - +- x86: Edi, Esi, Ebx, Ebp, Esp, Eip +- x64: Rdi, Rsi, Rbx, Rbp, Rsp, Rip, R12:R15 +- ia64: IntS0:IntS3, RsBSP, StIFS, RsPFS, IntSp, StIIP, StIPSR diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/DoStackSnapshot - Exception Filters.md b/docs/design/coreclr/profiling/davbr-blog-archive/DoStackSnapshot - Exception Filters.md index 6c7e57eb53d05..7e9a9484f3fd0 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/DoStackSnapshot - Exception Filters.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/DoStackSnapshot - Exception Filters.md @@ -35,9 +35,9 @@ The filters are the things that come after "When". We all know that, when an exc The thing you need to realize about DoStackSnapshot's behavior (indeed, CLR in general) is that the execution of a When clause is really a separate function call. In the above example, imagine we take a stack snapshot while inside Positive(). Our managed-only stack trace, as reported by DoStackSnapshot, would then look like this (stack grows up): -Positive -Main -Thrower +Positive\ +Main\ +Thrower\ Main It's that highlighted Main that seems odd at first. While the exception is thrown inside Thrower(), the CLR needs to execute the filter clauses to figure out which Catch wins. These filter executions are actually _function calls_. Since filter clauses don't have their own names, we just use the name of the function containing the filter clause for stack reporting purposes. Thus, the highlighted Main above is the execution of a filter clause located inside Main (in this case, "When Positive()"). When each filter clause completes, we "return" back to Thrower() to continue our search for the filter that returns True. Since this is how the call stack is built up, that's what DoStackSnapshot will report. diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/Generics and Your Profiler.md b/docs/design/coreclr/profiling/davbr-blog-archive/Generics and Your Profiler.md index d5ddb5dc777c8..856290fd99311 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/Generics and Your Profiler.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/Generics and Your Profiler.md @@ -57,8 +57,6 @@ HRESULT GetFunctionInfo2([in] FunctionID funcId, typeArgs[]: This is the array of **type arguments** to MyClass\.Foo\. So this will be an array of only one element: the ClassID for float. (The int in MyClass\ is a type argument to MyClass, not to Foo, and you would only see that when you call GetClassIDInfo2 with MyClass\.) -## - ## GetClassIDInfo2 OK, someone in parentheses said something about calling GetClassIDInfo2, so let’s do that. Since we got the ClassID for MyClass\ above, let’s pass it to GetClassIDInfo2 to see what we get: @@ -102,8 +100,6 @@ With a valid COR\_PRF\_FRAME\_INFO, GetFunctionInfo2 will give you helpful, spec It’s worth noting here that there is a bug in GetFunctionInfo2, in that the [out] pClassId you get for the class containing the function can be wrong with generic virtual functions. Take a look at [this forum post](http://social.msdn.microsoft.com/Forums/en-US/netfxtoolsdev/thread/ed6f972f-712a-48df-8cce-74f8951503fa/) for more information and a workaround. -## - ## ClassIDs & FunctionIDs vs. Metadata Tokens Although you can infer this from the above, let’s take a breather and review. When you have multiple generic instantiations of a generic type, that type is defined with one mdTypeDef (metadata token), but you’ll see multiple ClassIDs (one per instantiation). When you have multiple generic instantiations of a generic method, it’s defined with one mdMethodDef (metadata token), but you’ll see multiple FunctionIDs (one per instantiation). diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/Metadata Tokens, Run-Time IDs, and Type Loading.md b/docs/design/coreclr/profiling/davbr-blog-archive/Metadata Tokens, Run-Time IDs, and Type Loading.md index fded71588a335..283194e860ddc 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/Metadata Tokens, Run-Time IDs, and Type Loading.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/Metadata Tokens, Run-Time IDs, and Type Loading.md @@ -31,10 +31,6 @@ Yes, that is a good example. You are an astute reader. Memory profilers that w # Going from metadata token to run-time ID -# - -# - As I mentioned above, the safest way to do this is to build up your own map and do reverse-lookups as necessary. If that scheme meets your needs, then by all means do that, and stop reading! But in the cases where this is insufficient, you may need to resort to using GetFunctionFromToken(AndTypeArgs) and GetClassFromToken(AndTypeArgs). There is no simple, foolproof way to use these APIs safely, but here is your guideline: **Never call GetFunctionFromToken(AndTypeArgs) and GetClassFromToken(AndTypeArgs) unless you’re certain the relevant types have been loaded.** (“Relevant types” include the ClassID containing the FunctionID whose mdMethodDef you pass to GetFunctionFromToken(AndTypeArgs), and the ClassID whose mdTypeDef you pass to GetClassFromToken(AndTypeArgs).) If these types have not been loaded, _you may cause them to be loaded now_! This is bad because: diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/ReJIT - The Basics.md b/docs/design/coreclr/profiling/davbr-blog-archive/ReJIT - The Basics.md index 440931e7dca13..544e0da32d0f4 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/ReJIT - The Basics.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/ReJIT - The Basics.md @@ -22,12 +22,6 @@ Typically, your profiler will also create a new thread at this point, call it yo ## ModuleLoadFinished Time -### - -### - -### - ### Metadata Changes As each module loads, you will likely need to add metadata so that your future ReJITs will have the tokens they need. What you do here heavily depends on the kind of instrumentation you want to do. I’m assuming you’re doing instrumentation that adds some calls from the user code into brand new profiler helper methods you will add somewhere. If you plan to instrument mscorlib, you will likely want to add those profiler helper methods into mscorlib (remember, mscorlib is not allowed to contain an AssemblyRef that points to any other assembly!). Otherwise, perhaps you plan to ship a managed helper assembly that will sit on your user’s disk, and all your profiler helper methods will reside in this on-disk managed helper assembly. @@ -56,10 +50,6 @@ Now imagine your user has turned some dial on your out-of-process GUI, to reques - This is optional, and only need be done if you truly want this ReJIT request to apply to all unshared copies of the function. You’re perfectly welcome to ReJIT only those unshared copies you want (and / or the shared copy). - Now you can re-read the “Re-Request Prior ReJITs” section above. :-) -## - -### - ### More on AppDomains This whole shared / multiple unshared business can get confusing. So to bring it home, consider your user. If your user expresses instrumentation intent at the level of a class/method name, then you pretty much want to ReJIT every copy of that function (all unshared copies plus the shared copy). But if your user expresses instrumentation intent at the level of a class/method name _plus AppDomain_ (think one single AppPool inside ASP.NET), then you’d only want to ReJIT the copy of the function that resides in the single ModuleID associated with that AppDomain. From ce39223c1267d6cd8251fcaaa760010654cc4ae8 Mon Sep 17 00:00:00 2001 From: Jan Kotas Date: Sun, 16 Aug 2020 09:02:41 -0700 Subject: [PATCH 08/23] Update docs/design/coreclr/profiling/davbr-blog-archive/Sample A Signature Blob Parser for your Profiler.md Co-authored-by: Youssef Victor <31348972+Youssef1313@users.noreply.github.com> --- .../Sample A Signature Blob Parser for your Profiler.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/Sample A Signature Blob Parser for your Profiler.md b/docs/design/coreclr/profiling/davbr-blog-archive/Sample A Signature Blob Parser for your Profiler.md index 52f5fd1b95593..2fa9d93b61617 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/Sample A Signature Blob Parser for your Profiler.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/Sample A Signature Blob Parser for your Profiler.md @@ -11,9 +11,10 @@ PropertySig LocalVarSig Here are the files: -[sigparse.cpp](samples/sigparse.cpp) (Rico's signature parser) -[sigformat.cpp](samples/sigformat.cpp) (An example extension to the parser) -[PlugInToYourProfiler.cpp](samples/PlugInToYourProfiler.cpp) (Example code to plug the extension into your profiler) + +- [sigparse.cpp](samples/sigparse.cpp) (Rico's signature parser) +- [sigformat.cpp](samples/sigformat.cpp) (An example extension to the parser) +- [PlugInToYourProfiler.cpp](samples/PlugInToYourProfiler.cpp) (Example code to plug the extension into your profiler) Open up **sigparse.cpp** in your favorite editor and take a look at the grammar at the top. The grammar comes from the ECMA CLI spec. Jonathan Keljo has a [link](http://blogs.msdn.com/jkeljo/archive/2005/08/04/447726.aspx) to it from his blog. This tells you the types of signature blobs the parser can handle. @@ -60,4 +61,3 @@ Don't worry, it's optional. I mentioned above that only signatures whose grammar The only gotcha is that TypeSpecs & MethodSpecs don’t have a unique byte that introduces them. For example, GENERICINST could indicate the beginning of a TypeSpec or a MethodSpec. You’ll see that SigParser::Parse() switches on the intro byte to determine what it’s looking at. So to keep things simple, you’ll want to add a couple more top-level functions to SigParser to parse TypeSpecs & MethodSpecs (say, ParseTypeSpec() & ParseMethodSpec()). You’d then call those functions instead of Parse() when you have a TypeSpec or MethodSpec on your hands. Of course, if you don’t care about TypeSpecs and MethodSpecs, you can use the code as is and not worry. But this stuff is so much fun, you’ll probably want to add the capability anyway. Hope you find this useful. And thanks again to Rico Mariani for sigparse.cpp! - From 58ce98312dde44d6295e999331c5cc6d1d454439 Mon Sep 17 00:00:00 2001 From: Jan Kotas Date: Sun, 16 Aug 2020 09:03:10 -0700 Subject: [PATCH 09/23] Update docs/design/coreclr/profiling/davbr-blog-archive/Sample A Signature Blob Parser for your Profiler.md Co-authored-by: Youssef Victor <31348972+Youssef1313@users.noreply.github.com> --- ...mple A Signature Blob Parser for your Profiler.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/Sample A Signature Blob Parser for your Profiler.md b/docs/design/coreclr/profiling/davbr-blog-archive/Sample A Signature Blob Parser for your Profiler.md index 2fa9d93b61617..148aa404c63cb 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/Sample A Signature Blob Parser for your Profiler.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/Sample A Signature Blob Parser for your Profiler.md @@ -3,12 +3,12 @@ If your profiler plays with metadata, you've undoubtedly come across signature blobs. They’re used to encode type information for method definitions & references, local variables, and a whole lot more. They’re wonderfully compact, recursively versatile, and sometimes, well, challenging to parse. Fortunately, [Rico Mariani](https://docs.microsoft.com/en-us/archive/blogs/ricom/) was feeling generous one day, and churned out a simple parser that can read these types of signatures: -MethodDefSig -MethodRefSig -StandAloneMethodSig -FieldSig -PropertySig -LocalVarSig +- MethodDefSig +- MethodRefSig +- StandAloneMethodSig +- FieldSig +- PropertySig +- LocalVarSig Here are the files: From 9fe64cfa2815d117b78a26138f0339c236ebe3b1 Mon Sep 17 00:00:00 2001 From: Youssef Victor <31348972+Youssef1313@users.noreply.github.com> Date: Sun, 16 Aug 2020 20:05:09 +0200 Subject: [PATCH 10/23] Manual fixes --- .../ELT Hooks - The Basics.md | 112 +++++++++--------- 1 file changed, 57 insertions(+), 55 deletions(-) diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/ELT Hooks - The Basics.md b/docs/design/coreclr/profiling/davbr-blog-archive/ELT Hooks - The Basics.md index bcd246837a365..7acb138cbd1e7 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/ELT Hooks - The Basics.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/ELT Hooks - The Basics.md @@ -5,79 +5,84 @@ The CLR Profiling API allows you to hook managed functions so that your profiler ### Setting up the hooks -1. On initialization, your profiler must call SetEnterLeaveFunctionHooks(2) to specify which functions inside your profiler should be called whenever a managed function is entered, returns, or exits via tail call, respectively. - _(Profiler calls this…)_ +1. On initialization, your profiler must call SetEnterLeaveFunctionHooks(2) to specify which functions inside your profiler should be called whenever a managed function is entered, returns, or exits via tail call, respectively. + + _(Profiler calls this…)_ + ``` HRESULT SetEnterLeaveFunctionHooks( - [in] FunctionEnter \*pFuncEnter, - [in] FunctionLeave \*pFuncLeave, - [in] FunctionTailcall \*pFuncTailcall); + [in] FunctionEnter *pFuncEnter, + [in] FunctionLeave *pFuncLeave, + [in] FunctionTailcall *pFuncTailcall); ``` - _(Profiler implements these…)_ - ``` - typedef void FunctionEnter(FunctionID funcID); - typedef void FunctionLeave(FunctionID funcID); - typedef void FunctionTailcall(FunctionID funcID); - ``` + _(Profiler implements these…)_ - **OR** + ``` + typedef void FunctionEnter(FunctionID funcID); + typedef void FunctionLeave(FunctionID funcID); + typedef void FunctionTailcall(FunctionID funcID); + ``` - _(Profiler calls this…)_ - ``` - HRESULT SetEnterLeaveFunctionHooks2( - [in] FunctionEnter2 *pFuncEnter, - [in] FunctionLeave2 *pFuncLeave, - [in] FunctionTailcall2 *pFuncTailcall); - ``` + **OR** + _(Profiler calls this…)_ - _(Profiler implements these…)_ - ``` - typedef void FunctionEnter2( - FunctionID funcId, - UINT_PTR clientData, - COR_PRF_FRAME_INFO func, - COR_PRF_FUNCTION_ARGUMENT_INFO *argumentInfo); - - typedef void FunctionLeave2( - FunctionID funcId, - UINT_PTR clientData, - COR_PRF_FRAME_INFO func, - COR_PRF_FUNCTION_ARGUMENT_RANGE *retvalRange); - - typedef void FunctionTailcall2( - FunctionID funcId, - UINT_PTR clientData, - COR_PRF_FRAME_INFO func); - ``` + ``` + HRESULT SetEnterLeaveFunctionHooks2( + [in] FunctionEnter2 *pFuncEnter, + [in] FunctionLeave2 *pFuncLeave, + [in] FunctionTailcall2 *pFuncTailcall); + ``` - This step alone does not cause the enter/leave/tailcall (ELT) hooks to be called. But you must do this on startup to get things rolling. + _(Profiler implements these…)_ -2. At any time during the run, your profiler calls SetEventMask specifying COR\_PRF\_MONITOR\_ENTERLEAVE in the bitmask. Your profiler may set or reset this flag at any time to cause ELT hooks to be called or ignored, respectively. + ``` + typedef void FunctionEnter2( + FunctionID funcId, + UINT_PTR clientData, + COR_PRF_FRAME_INFO func, + COR_PRF_FUNCTION_ARGUMENT_INFO *argumentInfo); + + typedef void FunctionLeave2( + FunctionID funcId, + UINT_PTR clientData, + COR_PRF_FRAME_INFO func, + COR_PRF_FUNCTION_ARGUMENT_RANGE *retvalRange); + + typedef void FunctionTailcall2( + FunctionID funcId, + UINT_PTR clientData, + COR_PRF_FRAME_INFO func); + ``` + + This step alone does not cause the enter/leave/tailcall (ELT) hooks to be called. But you must do this on startup to get things rolling. + +2. At any time during the run, your profiler calls SetEventMask specifying COR\_PRF\_MONITOR\_ENTERLEAVE in the bitmask. Your profiler may set or reset this flag at any time to cause ELT hooks to be called or ignored, respectively. ### FunctionIDMapper In addition to the above two steps, your profiler may specify more granularly which managed functions should have ELT hooks compiled into them: -1. At any time, your profiler may call ICorProfilerInfo2::SetFunctionIDMapper to specify a special hook to be called when a function is JITted. +1. At any time, your profiler may call ICorProfilerInfo2::SetFunctionIDMapper to specify a special hook to be called when a function is JITted. -_(Profiler calls this…)_ -``` - HRESULT SetFunctionIDMapper([in] FunctionIDMapper \*pFunc); -``` + _(Profiler calls this…)_ + ``` + HRESULT SetFunctionIDMapper([in] FunctionIDMapper \*pFunc); + ``` - _(Profiler implements this…)_ -``` -typedef UINT_PTR __stdcall FunctionIDMapper( - FunctionID funcId, - BOOL *pbHookFunction); -``` + _(Profiler implements this…)_ + + ``` + typedef UINT_PTR __stdcall FunctionIDMapper( + FunctionID funcId, + BOOL *pbHookFunction); + ``` 2. When FunctionIDMapper is called: - a. Your profiler sets the pbHookFunction [out] parameter appropriately to determine whether the function identified by funcId should have ELT hooks compiled into it. + a. Your profiler sets the pbHookFunction \[out] parameter appropriately to determine whether the function identified by funcId should have ELT hooks compiled into it. b. Of course, the primary purpose of FunctionIDMapper is to allow your profiler to specify an alternate ID for that function. Your profiler does this by returning that ID from FunctionIDMapper . The CLR will pass this alternate ID to your ELT hooks (as funcID if you're using the 1.x ELT, and as clientData if you're using the 2.x ELT). ### Writing your ELT hooks @@ -92,8 +97,6 @@ The solution is “NGEN /Profile”. For example, if you run this command agains `ngen install MyAssembly.dll /Profile` - - it will NGEN MyAssembly.dll with the “Profile” flavor (also called “profiler-enhanced”). This flavor causes extra hooks to be baked in to enable features like ELT hooks, loader callbacks, managed/unmanaged code transition callbacks, and the JITCachedFunctionSearchStarted/Finished callbacks. The original NGENd versions of all your assemblies still stay around in your NGEN cache. NGEN /Profile simply causes a new set of NGENd assemblies to be generated as well, marked as the “profiler-enhanced” set of NGENd assemblies. At run-time, the CLR determines which flavor should be loaded. If a profiler is attached and enables certain features that only work with profiler-enhanced (not regular) NGENd assemblies (such as ELT via a call to SetEnterLeaveFunctionHooks(2), or any of several other features that are requested by setting particular event flags via SetEventMask), then the CLR will only load profiler-enhanced NGENd images--and if none exist then the CLR degrades to JIT in order to support the features requested by the profiler. In contrast, if the profiler does not specify such event flags, or there is no profiler to begin with, then the CLR loads the regular-flavored NGENd assemblies. @@ -128,4 +131,3 @@ Why do you care? Well, it's always good to know what price you're paying. If you ### Next time... That about covers it for the ELT basics. Next installment of this riveting series will talk about that enigma known as tailcall. - From a2ad234a17875eedfeb000af07ec761a02cdba1c Mon Sep 17 00:00:00 2001 From: Youssef Victor <31348972+Youssef1313@users.noreply.github.com> Date: Sun, 16 Aug 2020 20:15:48 +0200 Subject: [PATCH 11/23] Needs manual review, the list numbering is really strange --- ...rofiler stack walking Basics and beyond.md | 110 ++++++------------ 1 file changed, 35 insertions(+), 75 deletions(-) diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/Profiler stack walking Basics and beyond.md b/docs/design/coreclr/profiling/davbr-blog-archive/Profiler stack walking Basics and beyond.md index 21ea9dcf77009..396dc86ab7e1d 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/Profiler stack walking Basics and beyond.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/Profiler stack walking Basics and beyond.md @@ -27,7 +27,7 @@ HRESULT DoStackSnapshot( ``` And here’s what the CLR calls on your profiler (you can also find this in corprof.idl). You’ll pass a pointer to your implementation of this function in the callback parameter above. ``` -typedef HRESULT \_\_stdcall StackSnapshotCallback( +typedef HRESULT __stdcall StackSnapshotCallback( FunctionID funcId, UINT_PTR ip, COR_PRF_FRAME_INFO frameInfo, @@ -77,46 +77,16 @@ Before I continue from this exciting cliffhanger, a brief interlude. Everyone k Now that we’re speaking the same language. Let’s look at a mixed-mode stack: -| - +``` Unmanaged - - | -| - D (Managed) - - | -| - Unmanaged - - | -| - C (Managed) - - | -| - B (Managed) - - | -| - Unmanaged - - | -| - A (Managed) - - | -| - Main (Managed) - - | +``` Stepping back a bit, it’s worthwhile to understand why DoStackSnapshot exists in the first place. It’s there to help you walk _managed_ frames on the stack. If you tried to walk managed frames yourself, you would get unreliable results, particularly on 32 bits, because of some wacky calling conventions used in managed code. The CLR understands these calling conventions, and DoStackSnapshot is therefore in a uniquely suitable position to help you decode them. However, DoStackSnapshot is not a complete solution if you want to be able to walk the entire stack, including unmanaged frames. Here’s where you have a choice: @@ -145,81 +115,71 @@ But before you get too deep, note that the issue of whether and how to seed a st For the truly adventurous profiler that is doing an asynchronous, cross-thread, seeded stack walk while filling in the unmanaged holes, here’s what it would look like. -| - +``` Block of Unmanaged Frames +``` - | 1. You suspend the target thread (target thread’s suspend count is now 1) 2. You get the target thread’s current register context 3. You determine if the register context points to unmanaged code (e.g., call ICorProfilerInfo2::GetFunctionFromIP(), and see if you get back a 0 FunctionID) 4. In this case the register context does point to unmanaged code, so you perform an unmanaged stack walk until you find the top-most managed frame (D) - | -| -Function D -(Managed) + ``` + Function D + (Managed) + ``` - | 1. You call DoStackSnapshot with your seed context. CLR suspends target thread again: its suspend count is now 2. Our sandwich begins. 1. CLR calls your StackSnapshotCallback with FunctionID for D. - | -| -Block of -Unmanaged -Frames + ``` + Block of + Unmanaged + Frames + ``` - | 1. CLR calls your StackSnapshotCallback with FunctionID=0. You’ll need to walk this block yourself. You can stop when you hit the first managed frame, or you can cheat: delay your unmanaged walk until sometime after your next callback, as the next callback will tell you exactly where the next managed frame begins (and thus where your unmanaged walk should end). - | -| - -Function C -(Managed) - | + ``` + Function C + (Managed) + ``` 1. CLR calls your StackSnapshotCallback with FunctionID for C. - | -| -Function B -(Managed) + ``` + Function B + (Managed) + ``` - | 1. CLR calls your StackSnapshotCallback with FunctionID for B. - | -| -Block of -Unmanaged -Frames + ``` + Block of + Unmanaged + Frames + ``` - | 1. CLR calls your StackSnapshotCallback with FunctionID=0. Again, you’ll need to walk this block yourself. - | -| -Function A -(Managed) + ``` + Function A + (Managed) + ``` - | 1. CLR calls your StackSnapshotCallback with FunctionID for A. - | -| -Main -(Managed) + ``` + Main + (Managed) + ``` - | 1. CLR calls your StackSnapshotCallback with FunctionID for Main. 2. DoStackSnapshot “resumes” target thread (its suspend count is now 1) and returns. Our sandwich is complete. 1. You resume target thread (its suspend count is now 0, so it’s resumed for real). - | **Triumph over evil** From 8954ad4478ec48aa7e7c41a40b168f62d4343244 Mon Sep 17 00:00:00 2001 From: Youssef Victor <31348972+Youssef1313@users.noreply.github.com> Date: Sun, 16 Aug 2020 20:18:02 +0200 Subject: [PATCH 12/23] Fix --- .../Profiler stack walking Basics and beyond.md | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/Profiler stack walking Basics and beyond.md b/docs/design/coreclr/profiling/davbr-blog-archive/Profiler stack walking Basics and beyond.md index 396dc86ab7e1d..935138b64f909 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/Profiler stack walking Basics and beyond.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/Profiler stack walking Basics and beyond.md @@ -115,11 +115,7 @@ But before you get too deep, note that the issue of whether and how to seed a st For the truly adventurous profiler that is doing an asynchronous, cross-thread, seeded stack walk while filling in the unmanaged holes, here’s what it would look like. -``` -Block of -Unmanaged -Frames -``` +Block of Unmanaged Frames 1. You suspend the target thread (target thread’s suspend count is now 1) 2. You get the target thread’s current register context From f45553802186b32f82cb5b5ddef836529c6284cf Mon Sep 17 00:00:00 2001 From: Youssef Victor <31348972+Youssef1313@users.noreply.github.com> Date: Sun, 16 Aug 2020 20:19:17 +0200 Subject: [PATCH 13/23] Code fence --- .../Sample A Signature Blob Parser for your Profiler.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/Sample A Signature Blob Parser for your Profiler.md b/docs/design/coreclr/profiling/davbr-blog-archive/Sample A Signature Blob Parser for your Profiler.md index 148aa404c63cb..88ab78844b3da 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/Sample A Signature Blob Parser for your Profiler.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/Sample A Signature Blob Parser for your Profiler.md @@ -27,6 +27,7 @@ Sigparse.cpp is structured without any dependencies on any headers, so you can e Simply derive a new class from SigParser, and override the virtual functions. The functions you override are events to be handled as the parser traverses the signature in top-down fashion. For example, when the parser encounters a MethodDef, you might see calls to your overrides of: +``` NotifyBeginMethod() NotifyParamCount() NotifyBeginRetType() @@ -41,6 +42,7 @@ NotifyBeginMethod() NotifyEndParam() _… (more parameter notifications occur here if more parameters exist)_ NotifyEndMethod() +``` And yes, generics are handled as well. From 28c867220dee7bc2b72d62680b0b929f0ba3f2b1 Mon Sep 17 00:00:00 2001 From: Youssef Victor <31348972+Youssef1313@users.noreply.github.com> Date: Sun, 16 Aug 2020 20:22:35 +0200 Subject: [PATCH 14/23] Deleted extra backslashes and vertical bars, fixed indentation, please review this --- .../davbr-blog-archive/Type Forwarding.md | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/Type Forwarding.md b/docs/design/coreclr/profiling/davbr-blog-archive/Type Forwarding.md index 0be1cb40932e2..aae3f7d874025 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/Type Forwarding.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/Type Forwarding.md @@ -9,14 +9,12 @@ Type forwarding is nothing new. However, in CLR V4, we are enabling type forwar The example I’ll use where the .NET Framework uses type forwarding is the TimeZoneInfo class. In CLR V4, TimeZoneInfo is now forwarded from System.Core.dll to mscorlib.dll. If you open the CLR V4 copy of System.Core.dll in ildasm and choose Dump, you'll see the following: -| ``` .class extern /*27000004*/ forwarder System.TimeZoneInfo { .assembly extern mscorlib /*23000001*/ } ``` - | In each assembly’s metadata is an exported types table. The above means that System.Core.dll's exported types table includes an entry for System.TimeZoneInfo (indexed by token 27000004). What's significant is that System.Core.dll no longer has a typeDef for System.TimeZoneInfo, only an exported type. The fact that the token begins at the left with 0x27 tells you that it's an mdtExportedType (not a mdtTypeDef, which begins at the left with 0x02). @@ -70,11 +68,11 @@ Note that, if you were to build the above C# code using the .NET 4.0 C# compiler Ok, so how do we run this pre-.NET 4.0 executable against .NET 4.0? A config file, of course. Paste the following into a file named Class1.exe.config that sits next to Class1.exe: ``` - - - - - + + + + + ``` The above will force Class1.exe to bind against .NET 4.0 Beta 1. And when it comes time to look for TimeZoneInfo, the CLR will first look in System.Core.dll, find the exported types table entry, and then hop over to mscorlib.dll to load the type. What does that look like to your profiler? Make your guess and hold that thought. First, another walkthrough… @@ -188,7 +186,6 @@ And this all despite the fact that MyClient.exe still believes that Foo lives in IL\_001c: ret } // end of method Test::Main ``` - | ## Profilers @@ -199,5 +196,3 @@ This should make life easy for profilers, since they generally expect to be able However, type forwarding is important to understand if your profiler needs to follow metadata references directly. More generally, if your profiler is reading through metadata and expects to come across a typeDef (e.g., perhaps a metadata reference points to a type in that module, or perhaps your profiler expects certain known types to be in certain modules), then your profiler should be prepared to find an mdtExportedType instead, and to deal gracefully with it rather than doing something silly like crashing. In any case, whether you think your profiler will be affected by type forwarding, be sure to test, test, test! - - From 12e09581f41447dcdfbada6be67d6c4ea6e99e5f Mon Sep 17 00:00:00 2001 From: Youssef Victor <31348972+Youssef1313@users.noreply.github.com> Date: Sun, 16 Aug 2020 20:24:21 +0200 Subject: [PATCH 15/23] Make rendered view match the raw md --- docs/design/features/additional-deps.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/docs/design/features/additional-deps.md b/docs/design/features/additional-deps.md index aee11a4b85903..9ce6cb4437642 100644 --- a/docs/design/features/additional-deps.md +++ b/docs/design/features/additional-deps.md @@ -52,9 +52,7 @@ The proposal for this is to "roll-backwards" starting with the "found" version. #### Roll-forward uses app's TFM -A secondary issue with with the store's naming convention for framework. It contains a path such as: - `\dotnet\store\x64\netcoreapp2.0\microsoft.applicationinsights\2.4.0` -where 'netcoreapp2.0' is a "tfm" (target framework moniker). During roll-forward cases, the tfm is still the value specified in the app's runtimeconfig. The host only includes store folders that match that tfm, so it may not find packages from other deps files that were generated off a different tfm. In addition, with the advent of multiple frameworks, it makes it cumbersome to be forced to install to every tfm because multiple frameworks may use the same package, and because each package is still identified by an exact version. +A secondary issue with with the store's naming convention for framework. It contains a path such as: `\dotnet\store\x64\netcoreapp2.0\microsoft.applicationinsights\2.4.0` where 'netcoreapp2.0' is a "tfm" (target framework moniker). During roll-forward cases, the tfm is still the value specified in the app's runtimeconfig. The host only includes store folders that match that tfm, so it may not find packages from other deps files that were generated off a different tfm. In addition, with the advent of multiple frameworks, it makes it cumbersome to be forced to install to every tfm because multiple frameworks may use the same package, and because each package is still identified by an exact version. The proposal for this is to add an "any" tfm. From 92476820923efc20c3e97f39467b322254560281 Mon Sep 17 00:00:00 2001 From: Youssef Victor <31348972+Youssef1313@users.noreply.github.com> Date: Sun, 16 Aug 2020 20:26:13 +0200 Subject: [PATCH 16/23] Make rendered view match the raw md --- docs/design/features/additional-deps.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/design/features/additional-deps.md b/docs/design/features/additional-deps.md index 9ce6cb4437642..2bc3e59492c82 100644 --- a/docs/design/features/additional-deps.md +++ b/docs/design/features/additional-deps.md @@ -79,8 +79,7 @@ Where "found" means the version that is being used at run time including roll-fo For example, `\dotnet\store\x64\any\microsoft.applicationinsights\2.4.0` -The `any` tfm would be used if the specified tfm (e.g. netcoreapp2.0) is not found: - `\dotnet\store\x64\netcoreapp2.0\microsoft.applicationinsights\2.4.0` +The `any` tfm would be used if the specified tfm (e.g. netcoreapp2.0) is not found: `\dotnet\store\x64\netcoreapp2.0\microsoft.applicationinsights\2.4.0` _Possible risk: doesn't this make "uninstall" more difficult? Because multiple installs may write the same packages and try to remove packages that another installer created?_ From ae86624d6736a56ee01b4cd6806e8f22e67be7eb Mon Sep 17 00:00:00 2001 From: Youssef Victor <31348972+Youssef1313@users.noreply.github.com> Date: Sun, 16 Aug 2020 20:29:02 +0200 Subject: [PATCH 17/23] Match raw with rendered --- docs/design/features/framework-version-resolution.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/design/features/framework-version-resolution.md b/docs/design/features/framework-version-resolution.md index 179db366aa41c..def26f1c0671a 100644 --- a/docs/design/features/framework-version-resolution.md +++ b/docs/design/features/framework-version-resolution.md @@ -30,8 +30,7 @@ In the `.runtimeconfig.json` these values are defined like this: ``` #### Framework name -Each framework reference identifies the framework by its name. -Framework names are case sensitive (since they're used as folder names even on Linux systems). +Each framework reference identifies the framework by its name. Framework names are case sensitive (since they're used as folder names even on Linux systems). #### Version Framework version must be a [SemVer V2](https://semver.org) valid version. From 6d38710bd42243fb5b2f3b92f0e8de784c3d2578 Mon Sep 17 00:00:00 2001 From: Youssef Victor <31348972+Youssef1313@users.noreply.github.com> Date: Sun, 16 Aug 2020 20:31:12 +0200 Subject: [PATCH 18/23] Match raw with rendered --- docs/design/features/host-error-codes.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/docs/design/features/host-error-codes.md b/docs/design/features/host-error-codes.md index 9e246409011fa..21ea9c8bc7114 100644 --- a/docs/design/features/host-error-codes.md +++ b/docs/design/features/host-error-codes.md @@ -7,11 +7,9 @@ Note that the exit code returned by running an application via `dotnet.exe` or ` * `Success` (`0`) - Operation was successful. -* `Success_HostAlreadyInitialized` (`0x00000001`) - Initialization was successful, but another host context is already initialized, so the returned context is "secondary". The requested context was otherwise fully compatible with the already initialized context. -This is returned by `hostfxr_initialize_for_runtime_config` if it's called when the host is already initialized in the process. Comes from `corehost_initialize` in `hostpolicy`. +* `Success_HostAlreadyInitialized` (`0x00000001`) - Initialization was successful, but another host context is already initialized, so the returned context is "secondary". The requested context was otherwise fully compatible with the already initialized context. This is returned by `hostfxr_initialize_for_runtime_config` if it's called when the host is already initialized in the process. Comes from `corehost_initialize` in `hostpolicy`. -* `Success_DifferentRuntimeProperties` (`0x00000002`) - Initialization was successful, but another host context is already initialized and the requested context specified some runtime properties which are not the same (either in value or in presence) to the already initialized context. -This is returned by `hostfxr_initialize_for_runtime_config` if it's called when the host is already initialized in the process. Comes from `corehost_initialize` in `hostpolicy`. +* `Success_DifferentRuntimeProperties` (`0x00000002`) - Initialization was successful, but another host context is already initialized and the requested context specified some runtime properties which are not the same (either in value or in presence) to the already initialized context. This is returned by `hostfxr_initialize_for_runtime_config` if it's called when the host is already initialized in the process. Comes from `corehost_initialize` in `hostpolicy`. ### Failure error/exit codes From 7d4f6e91403b9a766f7422f677aae466605b6343 Mon Sep 17 00:00:00 2001 From: Youssef Victor <31348972+Youssef1313@users.noreply.github.com> Date: Sun, 16 Aug 2020 20:38:13 +0200 Subject: [PATCH 19/23] Delete extra asterisks and | --- .../profiling/davbr-blog-archive/Debugging - Activation.md | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/Debugging - Activation.md b/docs/design/coreclr/profiling/davbr-blog-archive/Debugging - Activation.md index 9cda190345082..4afd8111db88d 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/Debugging - Activation.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/Debugging - Activation.md @@ -15,9 +15,8 @@ Environment variables --\> Registry --\> Profiler DLL on File system. The first link in this chain is to check the environment variables inside the process that should be profiled. If you're running the process from a command-prompt, you can just try a "set co" from the command prompt: -| ``` -**C:\>** set co +C:\> set co (blah blah, other vars beginning with "co") ``` @@ -25,7 +24,6 @@ The first link in this chain is to check the environment variables inside the pr Cor_Enable_Profiling=0x1 COR_PROFILER={C5F90153-B93E-4138-9DB7-EB7156B07C4C} ``` - | If your scenario doesn't allow you to just run the process from a command prompt, like say an asp.net scenario, you may want to attach a debugger to the process that's supposed to be profiled, or use IFEO (HKEY\_LOCAL\_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Image File Execution Options) to force a debugger to start when the worker process starts. In the debugger, you can then use "!peb" to view the environment block, which will include the environment variables. @@ -62,4 +60,3 @@ or even set a breakpoint inside your Profiler DLL's **DllMain.** Now go, and s If you're still going strong, set a breakpoint in your profiler's **Initialize** () callback. Failures here are actually a popular cause for activation problems. Inside your Initialize() callback, your profiler is likely calling QueryInterface for the ICorProfilerInfoX interface of your choice, and then calling SetEventMask, and doing other initialization-related tasks, like calling SetEnterLeaveFunctionHooks(2). Do any of these fail? Is your Initialize() callback returning a failure HRESULT? Hopefully by now you've isolated the failure point. If not, and your Initialize() is happily returning S\_OK, then your profiler is apparently loading just fine. At least it is when you're debugging it. :-) - From 6c22799bbee5b96e3b92cff59d285b898b6fee1b Mon Sep 17 00:00:00 2001 From: Youssef Victor <31348972+Youssef1313@users.noreply.github.com> Date: Sun, 16 Aug 2020 20:40:12 +0200 Subject: [PATCH 20/23] More cleanup (asterisks and |) --- .../Debugging - SOS and IDs.md | 20 ++++--------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/Debugging - SOS and IDs.md b/docs/design/coreclr/profiling/davbr-blog-archive/Debugging - SOS and IDs.md index 6f3da89e5c5ef..7616327f87735 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/Debugging - SOS and IDs.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/Debugging - SOS and IDs.md @@ -9,9 +9,8 @@ SOS.DLL is a debugger extension DLL that ships with the CLR. You'll find it sit In windbg, you'll need mscorwks.dll to load first, and then you can load SOS. Often, I don't need SOS until well into my debugging session, at which point mscorwks.dll has already been loaded anyway. However, there are some cases where you'd like SOS loaded at the first possible moment, so you can use some of its commands early (like !bpmd to set a breakpoint on a managed method). So a surefire way to get SOS loaded ASAP is to have the debugger break when mscorwks gets loaded (e.g., "sxe ld mscorwks"). Once mscorwks is loaded, you can load SOS using the .loadby command: -| ``` -0:000\> **sxe ld mscorwks** +0:000\> sxe ld mscorwks 0:000\> g ModLoad: 79e70000 7a3ff000 C:\Windows\Microsoft.NET\Framework\v2.0.50727\mscorwks.dll eax=00000000 ebx=00000000 ecx=00000000 edx=00000000 esi=7efdd000 edi=20000000 @@ -19,9 +18,8 @@ In windbg, you'll need mscorwks.dll to load first, and then you can load SOS. O cs=0023 ss=002b ds=002b es=002b fs=0053 gs=002b efl=00000202 ntdll!NtMapViewOfSection+0x12: 77a1a9fa c22800 ret 28h - 0:000\> **.loadby sos mscorwks** + 0:000\> .loadby sos mscorwks ``` - | With SOS loaded, you can now use its commands to inspect the various IDs that the profiling API passes to your profiler. @@ -35,7 +33,6 @@ As far as your profiler is concerned, a FunctionID is just an opaque number. It Ok, so FunctionID = (MethodDesc \*). How does that help you? SOS just so happens to have a command to inspect MethodDescs: !dumpmd. So if you're in a debugger looking at your profiler code that's operating on a FunctionID, it can beneficial to you to find out which function that FunctionID actually refers to. In the example below, the debugger will break in my proifler's JITCompilationStarted callback and look at the FunctionID. It's assumed that you've already loaded SOS as per above. -| ``` 0:000\> bu UnitTestSampleProfiler!SampleCallbackImpl::JITCompilationStarted 0:000\> g @@ -50,36 +47,30 @@ Breakpoint 0 hit UnitTestSampleProfiler!SampleCallbackImpl::JITCompilationStarted: 10003fc0 55 push ebp ``` - | The debugger is now sitting at the beginning of my profiler's JITCompilationStarted callback. Let's take a look at the parameters. -| ``` 0:000\> dv this = 0x00c133f8 - **functionID = 0x1e3170** + functionID = 0x1e3170 fIsSafeToBlock = 1 ``` - | Aha, that's the FunctionID about to get JITted. Now use SOS to see what that function really is. -| ``` 0:000\> !dumpmd 0x1e3170 Method Name: test.Class1.Main(System.String[]) Class: 001e1288 -**MethodTable: 001e3180** mdToken: 06000001 +MethodTable: 001e3180 mdToken: 06000001 Module: 001e2d8c IsJitted: no m\_CodeOrIL: ffffffff ``` - | Lots of juicy info here, though the Method Name typically is what helps me the most in my debugging sessions. mdToken tells us the metadata token for this method. MethodTable tells us where another internal CLR data structure is stored that contains information about the class containing the function. In fact, the profiing API's ClassID is simply a MethodTable \*. [Note: the "Class: 001e1288" in the output above is very different from the MethodTable, and thus different from the profiling API's ClassID. Don't let the name fool you!] So we could go and inspect a bit further by dumping information about the MethodTable: -| ``` 0:000\> !dumpmt 0x001e3180 EEClass: 001e1288 @@ -91,7 +82,6 @@ Lots of juicy info here, though the Method Name typically is what helps me the m Number of IFaces in IFaceMap: 0 Slots in VTable: 6 ``` - | And of course, !dumpmt can be used anytime you come across a ClassID and want more info on it. @@ -126,11 +116,9 @@ It would probably be quicker to list what _isn't_ useful! I encourage you to do !bpmd lets you place a breakpoint on a managed method. Just specify the module name and the fully-qualified method name. For example: -| ``` !bpmd MyModule.exe MyNamespace.MyClass.Foo ``` - | If the method hasn't jitted yet, no worries. A "pending" breakpoint is placed. If your profiler performs IL rewriting, then using !bpmd on startup to set a managed breakpoint can be a handy way to break into the debugger just before your instrumented code will run (which, in turn, is typically just after your instrumented code has been jitted). This can help you in reproducing and diagnosing issues your profiler may run into when instrumenting particular functions (due to something interesting about the signature, generics, etc.). From dcda7314247868568bced9be00e95942637447e5 Mon Sep 17 00:00:00 2001 From: Youssef Victor <31348972+Youssef1313@users.noreply.github.com> Date: Sun, 16 Aug 2020 20:48:54 +0200 Subject: [PATCH 21/23] Improve readability (review this) --- docs/design/features/framework-version-resolution.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/docs/design/features/framework-version-resolution.md b/docs/design/features/framework-version-resolution.md index def26f1c0671a..349d0f2501915 100644 --- a/docs/design/features/framework-version-resolution.md +++ b/docs/design/features/framework-version-resolution.md @@ -145,11 +145,13 @@ Pros Cons * Testing behavior of new releases with pre-release versions is not fully possible (see below). -* Some special cases don't work: -One special case which would not work: -*Component A which asks for `2.0.0 LatestMajor` is loaded first on a machine which has `3.0.0` and also `3.1.0-preview` installed. Because it's the first in the process it will resolve the runtime according to the above rules - that is prefer release version - and thus will select `3.0.0`. -Later on component B is loaded which asks for `3.1.0-preview LatestMajor` (for example the one in active development). This load will fail since `3.0.0` is not enough to run this component. -Loading the components in reverse order (B first and then A) will work since the `3.1.0-preview` runtime will be selected.* +* Some special cases don't work. + + One special case which would not work: + *Component A which asks for `2.0.0 LatestMajor` is loaded first on a machine which has `3.0.0` and also `3.1.0-preview` installed. Because it's the first in the process it will resolve the runtime according to the above rules - that is prefer release version - and thus will select `3.0.0`.* + + *Later on component B is loaded which asks for `3.1.0-preview LatestMajor` (for example the one in active development). This load will fail since `3.0.0` is not enough to run this component.* + *Loading the components in reverse order (B first and then A) will work since the `3.1.0-preview` runtime will be selected.* Modification to automatic roll forward to latest patch: Existing behavior is to find a matching framework based on the above rules and then apply roll forward to latest patch (except if `Disable` is specified). The new behavior should be: From 15ce76c816eb32f6db7c7a663cca26bc69cb9bd8 Mon Sep 17 00:00:00 2001 From: Jan Kotas Date: Wed, 19 Aug 2020 17:19:59 -0700 Subject: [PATCH 22/23] Update ELT Hooks - tail calls.md --- .../profiling/davbr-blog-archive/ELT Hooks - tail calls.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/ELT Hooks - tail calls.md b/docs/design/coreclr/profiling/davbr-blog-archive/ELT Hooks - tail calls.md index d47fffeb084da..fcd96baec0a23 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/ELT Hooks - tail calls.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/ELT Hooks - tail calls.md @@ -3,7 +3,7 @@ For most people the idea of entering or returning from a function seems straightforward. Your profiler's Enter hook is called at the beginning of a function, and its Leave hook is called just before the function returns. But the idea of a tail call and exactly what that means for the Profiling API is less straightforward. -In [Part 1](ELT Hooks - The Basics.md) I talked about the basics of the Enter / Leave / Tailcall hooks and generally how they work. You may want to review that post first if you haven't seen it yet. This post builds on that one by talking exclusively about the Tailcall hook, how it works, and what profilers should do inside their Tailcall hooks. +In (Part 1)[ELT Hooks - The Basics.md] I talked about the basics of the Enter / Leave / Tailcall hooks and generally how they work. You may want to review that post first if you haven't seen it yet. This post builds on that one by talking exclusively about the Tailcall hook, how it works, and what profilers should do inside their Tailcall hooks. ## Tail calling in general From 7970628959e73fd06d6d83e7edbac677624a2188 Mon Sep 17 00:00:00 2001 From: Jan Kotas Date: Wed, 19 Aug 2020 17:24:40 -0700 Subject: [PATCH 23/23] Update ELT Hooks - tail calls.md --- .../profiling/davbr-blog-archive/ELT Hooks - tail calls.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/design/coreclr/profiling/davbr-blog-archive/ELT Hooks - tail calls.md b/docs/design/coreclr/profiling/davbr-blog-archive/ELT Hooks - tail calls.md index fcd96baec0a23..d00ceba195e7c 100644 --- a/docs/design/coreclr/profiling/davbr-blog-archive/ELT Hooks - tail calls.md +++ b/docs/design/coreclr/profiling/davbr-blog-archive/ELT Hooks - tail calls.md @@ -3,7 +3,7 @@ For most people the idea of entering or returning from a function seems straightforward. Your profiler's Enter hook is called at the beginning of a function, and its Leave hook is called just before the function returns. But the idea of a tail call and exactly what that means for the Profiling API is less straightforward. -In (Part 1)[ELT Hooks - The Basics.md] I talked about the basics of the Enter / Leave / Tailcall hooks and generally how they work. You may want to review that post first if you haven't seen it yet. This post builds on that one by talking exclusively about the Tailcall hook, how it works, and what profilers should do inside their Tailcall hooks. +In [Part 1](ELT Hooks - The Basics.md) I talked about the basics of the Enter / Leave / Tailcall hooks and generally how they work. You may want to review that post first if you haven't seen it yet. This post builds on that one by talking exclusively about the Tailcall hook, how it works, and what profilers should do inside their Tailcall hooks. ## Tail calling in general @@ -121,9 +121,11 @@ Method 2: On tailcall, "mark" the FunctionID at the top of your stack as needing With this strategy, for the duration of the call to Three(), the shadow stack will look like this: +``` Three Helper (marked for deferred pop) Main +``` which some might consider more user-friendly. And as soon as Three() returns, your profiler will sneakily do a double-pop leaving just this: @@ -163,9 +165,11 @@ Method 2: Shadow stack fails At stage (4), the shadow stack looks like this: +``` Helper Thread.Sleep (marked for "deferred pop") Main +``` If you think it might be complicated to explain tail calls to your users so they can understand the Method 1 form of shadow stack presentation, just try explaining why it makes sense to present to them that Thread.Sleep() is calling Helper()!