From 92026a894b471f3e5484b156ff83dcab16214e1d Mon Sep 17 00:00:00 2001 From: FabioPinheiro Date: Wed, 7 Aug 2024 19:12:51 +0100 Subject: [PATCH 1/9] feat: Move ADRs to this repo Signed-off-by: FabioPinheiro --- documentation/adrs/README.md | 55 ++ documentation/adrs/adr.md | 42 + ...markdown-architectural-decision-records.md | 55 ++ ...rary-as-a-dsl-for-openapi-specification.md | 216 ++++++ ...vate-keys-of-issuers-inside-prism-agent.md | 30 + ...ll-library-for-sql-statement-generation.md | 179 +++++ ...age-migrations-for-application-services.md | 73 ++ ...instead-of-scala2-to-write-applications.md | 73 ++ ...thin-applications-to-manage-conccurency.md | 73 ++ .../20230405-did-linked-resources.md | 733 ++++++++++++++++++ ...230509-message-routing-for-multi-tenant.md | 58 ++ .../20230515-mediator-message-storage.md | 81 ++ ...-deterministic-key-generation-algorithm.md | 122 +++ ...0230518-data-isolation-for-multitenancy.md | 194 +++++ ...-facilitate-multitenancy-in-cloud-agent.md | 138 ++++ ...d-secure-cryptography-management-module.md | 135 ++++ ...4-performance-framework-for-atala-prism.md | 165 ++++ ...service-for-managing-wallet-permissions.md | 162 ++++ ...vocation-status-list-expansion-strategy.md | 83 ++ ...103-use-jwt-claims-for-agent-admin-auth.md | 142 ++++ ...115-Error-handling-report-problem-agent.md | 168 ++++ ...se-zio-failures-and-defects-effectively.md | 524 +++++++++++++ ...-state-records-and-sending-via-webhooks.md | 83 ++ ...520-use-did-urls-to-reference-resources.md | 62 ++ documentation/adrs/sidebars.js | 61 ++ documentation/adrs/template.md | 78 ++ documentation/docs/sidebars.js | 1 - docusaurus.config.js | 16 +- src/pages/index.js | 2 +- 29 files changed, 3801 insertions(+), 3 deletions(-) create mode 100644 documentation/adrs/README.md create mode 100644 documentation/adrs/adr.md create mode 100644 documentation/adrs/decisions/20220919-use-markdown-architectural-decision-records.md create mode 100644 documentation/adrs/decisions/20221005-using-tapir-library-as-a-dsl-for-openapi-specification.md create mode 100644 documentation/adrs/decisions/20221006-store-private-keys-of-issuers-inside-prism-agent.md create mode 100644 documentation/adrs/decisions/20230118-quill-library-for-sql-statement-generation.md create mode 100644 documentation/adrs/decisions/20230206-use-flyway-to-manage-migrations-for-application-services.md create mode 100644 documentation/adrs/decisions/20230206-use-scala3-instead-of-scala2-to-write-applications.md create mode 100644 documentation/adrs/decisions/20230206-use-zio-as-a-functional-effect-system-within-applications-to-manage-conccurency.md create mode 100644 documentation/adrs/decisions/20230405-did-linked-resources.md create mode 100644 documentation/adrs/decisions/20230509-message-routing-for-multi-tenant.md create mode 100644 documentation/adrs/decisions/20230515-mediator-message-storage.md create mode 100644 documentation/adrs/decisions/20230516-hierarchical-deterministic-key-generation-algorithm.md create mode 100644 documentation/adrs/decisions/20230518-data-isolation-for-multitenancy.md create mode 100644 documentation/adrs/decisions/20230527-use-keycloak-and-jwt-tokens-for-authentication-and-authorisation-to-facilitate-multitenancy-in-cloud-agent.md create mode 100644 documentation/adrs/decisions/20230628-apollo-as-centralised-and-secure-cryptography-management-module.md create mode 100644 documentation/adrs/decisions/20230714-performance-framework-for-atala-prism.md create mode 100644 documentation/adrs/decisions/20230926-use-keycloak-authorisation-service-for-managing-wallet-permissions.md create mode 100644 documentation/adrs/decisions/20230928-revocation-status-list-expansion-strategy.md create mode 100644 documentation/adrs/decisions/20240103-use-jwt-claims-for-agent-admin-auth.md create mode 100644 documentation/adrs/decisions/20240115-Error-handling-report-problem-agent.md create mode 100644 documentation/adrs/decisions/20240116-use-zio-failures-and-defects-effectively.md create mode 100644 documentation/adrs/decisions/20240307-handle-errors-in-bg-jobs-by-storing-on-state-records-and-sending-via-webhooks.md create mode 100644 documentation/adrs/decisions/20240520-use-did-urls-to-reference-resources.md create mode 100644 documentation/adrs/sidebars.js create mode 100644 documentation/adrs/template.md diff --git a/documentation/adrs/README.md b/documentation/adrs/README.md new file mode 100644 index 000000000..238ad8178 --- /dev/null +++ b/documentation/adrs/README.md @@ -0,0 +1,55 @@ +# Architecture Decision Records + +ADRs are automatically published to our Log4brains architecture knowledge base: + +**http://INSERT-YOUR-LOG4BRAINS-URL** + +Please use this link to browse them. + +## Development + +If not already done, install Log4brains: + +```bash +npm install -g log4brains +``` + +To preview the knowledge base locally, run: + +```bash +log4brains preview +``` + +In preview mode, the Hot Reload feature is enabled: any change you make to a markdown file is applied live in the UI. + +To create a new ADR interactively, run: + +```bash +log4brains adr new +``` + +## Mermaid support + +Log4brains does not support [Github Mermaid Diagrams](https://docs.github.com/en/get-started/writing-on-github/working-with-advanced-formatting/creating-diagrams) diagrams by default. + +To successfully render Mermaid diagrams on the server side, add the following code to your ADR: +``` +
+  ... your mermaid code here ...
+
+ +``` + +> Unfortunately, this diagram won't be automatically rendered in your preview mode. +> So, you could debug using github mermaid diagrams, but then integrate the code above in your ADR. + +## More information + +- [RFC-0016](https://input-output.atlassian.net/wiki/spaces/ATB/pages/3580559403/RFC+0016+-+Use+Architectural+Design+Records) +- [Engineering Guidance](https://input-output.atlassian.net/wiki/spaces/AV2/pages/3599237263/Architectural+Decision+Records+ADRs) +- [Log4brains documentation](https://github.com/thomvaill/log4brains/tree/master#readme) +- [What is an ADR and why should you use them](https://github.com/thomvaill/log4brains/tree/master#-what-is-an-adr-and-why-should-you-use-them) +- [ADR GitHub organization](https://adr.github.io/) diff --git a/documentation/adrs/adr.md b/documentation/adrs/adr.md new file mode 100644 index 000000000..0079f8401 --- /dev/null +++ b/documentation/adrs/adr.md @@ -0,0 +1,42 @@ +--- +id: adr +title: Architecture knowledge base +--- + + +# Architecture knowledge base + +Welcome 👋 to the architecture knowledge base of the Identus platform. + +You will find here all the Architecture Decision Records (ADR) of the project. + +The introduction of ADRs was approved in [RFC-0016](https://input-output.atlassian.net/wiki/spaces/ATB/pages/3580559403/RFC+0016+-+Use+Architectural+Design+Records) + +Engeering guidance on creating and managing ADRs can be found [here](https://input-output.atlassian.net/wiki/spaces/AV2/pages/3599237263/Architectural+Decision+Records+ADRs) + +## Definition and purpose + +> An Architectural Decision (AD) is a software design choice that addresses a functional or non-functional requirement that is architecturally significant. +> An Architectural Decision Record (ADR) captures a single AD, such as often done when writing personal notes or meeting minutes; the collection of ADRs created and maintained in a project constitutes its decision log. + +An ADR is immutable: only its status can change (i.e., become deprecated or superseded). That way, you can become familiar with the whole project history just by reading its decision log in chronological order. +Moreover, maintaining this documentation aims at: + +- 🚀 Improving and speeding up the onboarding of a new team member +- 🔭 Avoiding blind acceptance/reversal of a past decision (cf [Michael Nygard's famous article on ADRs](https://cognitect.com/blog/2011/11/15/documenting-architecture-decisions.html)) +- 🤝 Formalizing the decision process of the team + +## Usage + +This website is automatically updated after a change on the `main` branch of the project's Git repository. +In fact, the developers manage this documentation directly with markdown files located next to their code, so it is more convenient for them to keep it up-to-date. +You can browse the ADRs by using the left menu or the search bar. + +## More information + +- [RFC-0016](https://input-output.atlassian.net/wiki/spaces/ATB/pages/3580559403/RFC+0016+-+Use+Architectural+Design+Records) +- [Engineering Guidance](https://input-output.atlassian.net/wiki/spaces/AV2/pages/3599237263/Architectural+Decision+Records+ADRs) +- [Log4brains documentation](https://github.com/thomvaill/log4brains/tree/master#readme) +- [What is an ADR and why should you use them](https://github.com/thomvaill/log4brains/tree/master#-what-is-an-adr-and-why-should-you-use-them) +- [ADR GitHub organization](https://adr.github.io/) + diff --git a/documentation/adrs/decisions/20220919-use-markdown-architectural-decision-records.md b/documentation/adrs/decisions/20220919-use-markdown-architectural-decision-records.md new file mode 100644 index 000000000..f16dbd048 --- /dev/null +++ b/documentation/adrs/decisions/20220919-use-markdown-architectural-decision-records.md @@ -0,0 +1,55 @@ +# Use Markdown Architectural Decision Records + +- Status: accepted +- Date: 2022-09-19 +- Tags: doc + +## Context and Problem Statement + +We want to record architectural decisions made in this project. +Which format and structure should these records follow? + +## Decision Drivers + +- We want to improve the information and technical documentation of our software engineering projects +- We want to create an immutable log of important architectural decisions we have made during the software development +- We recognise the need for a complement to RFCs that typically documents the process before a decision has been reached (and not after) +- We want this decision log to offer a a standardised, lightweight, and extensible manner to increase consistency across systems +- We want this decision log to live as close as possible to the relevant code-base +- We want this decision log to be easily readable, discoverable and meaningfully searchable + +## Considered Options + +- [MADR](https://github.com/adr/madr/compare/3.0.0-beta...3.0.0-beta.2) 3.0.0-beta.2 +- [MADR](https://adr.github.io/madr/) 2.1.2 with Log4brains patch +- [MADR](https://adr.github.io/madr/) 2.1.2 – The original Markdown Architectural Decision Records +- [Michael Nygard's template](http://thinkrelevance.com/blog/2011/11/15/documenting-architecture-decisions) – The first incarnation of the term "ADR" + +## Decision Outcome + +Chosen option: "MADR 2.1.2 with Log4brains patch", because + +- The MADR format is lean and fits our development style. +- The MADR structure is comprehensible and facilitates usage & maintenance. +- The Log4brains patch adds more features, like tags. +- This format is compatible with Log4brains and allows us to run a portal with a timeline of ADRs + +The "Log4brains patch" performs the following modifications to the original template: + +- Change the ADR filenames format (`NNN-adr-name` becomes `YYYYMMDD-adr-name`), to avoid conflicts during Git merges. +- Add a `Tags` field. + +### Additional Information + +We will implement Architectural Decision Records (ADRs) with immediate effect; + +- ADRs are to be authored and published with (at minimum) 1 TA as decider; +- ADRs will be formatted using MADR 2.12 with log4Brains Patches format; +- ADRs are to be used to log system-wide decisions; +- Should the system consist of multiple code-repositories, ADRs should live in the main system repository; +- ADRs are to be stored in a subfolder docs/decisions/ of the repository for the software affected; +- ADRs will follow a flat filename convention with relevant components in their filename + +## Links + +- Relates to [RFC-0016](https://input-output.atlassian.net/wiki/spaces/ATB/pages/3580559403/RFC+0016+-+Use+Architectural+Design+Records) diff --git a/documentation/adrs/decisions/20221005-using-tapir-library-as-a-dsl-for-openapi-specification.md b/documentation/adrs/decisions/20221005-using-tapir-library-as-a-dsl-for-openapi-specification.md new file mode 100644 index 000000000..55be5ef96 --- /dev/null +++ b/documentation/adrs/decisions/20221005-using-tapir-library-as-a-dsl-for-openapi-specification.md @@ -0,0 +1,216 @@ +# Using tapir library as a DSL for OpenAPI specification + +- Status: accepted +- Deciders: Yurii Shynbuiev, David Poltorak, Benjamin Voiturier, Ilya Peresadin, Bart Suichies +- Date: [2022-10-05] +- Tags: OpenAPI, DSL, Tapir, code-generation, RESTAPI + +Related ADR/AIP: [Introduce REST HTTP for existing Node services](https://input-output.atlassian.net/wiki/spaces/AV2/pages/3454500948/AIP+-+001) + +## Context and Problem Statement +Identus Platform will contain the REST API. The decision was made by team consensus during the first AOH meeting to follow "OpenAPI specification first" approach and generate stubs, server side and client side code based on OAS. +Following this strategy we currently have 4-5 OAS files (Castor, Pollux, Mercury, Configuration). + +The following tool was selected for code generation: [OpenAPI Tools](https://github.com/OpenAPITools/openapi-generator) + +Instead of using the yaml file as OpenAPI specification and openapi-generator for server and client stub generation - this ADR proposes to use [Tapir](https://tapir.softwaremill.com/en/latest/index.html) Scala library as DSL for OpenAPI specification, `interpret` the endpoint defitions as Scala server and client stub, generate the yaml file, and use openapi-generator for client stubs. + +Technology stack that is going to be used in the Identus platform backend: Scala 3 + ZIO ecosystem + +Akka framework after version 2.6.x cannot be used because [Lightbend changed the license type to BSL 1.1](https://www.lightbend.com/blog/why-we-are-changing-the-license-for-akka). + +Looks like Akka 2.6.x still can be used according to [License FQA](https://www.lightbend.com/akka/license-faq) + +Currently, we have a code generation for Akka that is wrapped up into ZIO. Code generation mustache templates for ZIO-http are not available in OpenAPI tools. + +Mustache templates and code generation doesn't work out of the box, so the original templates where copied to the project and fixed by @Shota and @Pat. +Current templates and generator contains constraints that were reported by [@Pat](https://docs.google.com/document/d/1WhUtflM_o-5uSx9LW76lycz2kbk071cVZiv6EtVwhAQ/edit#heading=h.ywcvgffenpz) and [@Shota](https://input-output-rnd.slack.com/archives/G018JE9NHAM/p1664563129397819), this requires engineering time to adopt the OAS for a code generation. @Ben says that we can live with these constraints + +Generally, OAS files are written by the engineers with different experience and different view on formatting, schemas, normalization, datatype. For instance, in current templates don't have +- a consistent way for paginating the entities +- standard Responses for 4xx and 5xx errors +- normalized data types (we use ```anyOf```, ```allOf```) +- query parameters convention for filtering the entities +- some data types are duplicated in both Castor and Pollux OAS + +As OAS specification evolves it's getting harder to manage it because of the size of the file. +To mitigate this issue @Pat proposed to use well-known tools: +"Knowing that there are tools like [dhall](https://dhall-lang.org/#) or [CUE](https://cuelang.org/docs/integrations/openapi/) that allow us to write large configuration in yaml (or json) in a typesafe / reuseable way, I'm not hesitant to go contract-first."(c) + +Quality and formatting of autogenerated code depend on the template (not all templates are good enough). Making the good code from existing templates require additional time of engineers. + +### OpenAPI code generator constraints for Akka server +#### @Pat +- oneOf is not supported. It combines everything from the list if it’s an object, discard if it’s a primitive +- allOf is not supported as stated in the documentation, but testing locally it worked +- Have to handwrite the serialization layer +#### @Shota +- Undefined type ```AnyType```. You can have additionalProperties (```components/schemas//properties/additionalProperties```) in the schema, when you add it, it will generate a type for \ that has another type called `AnyType` inside, this type is not defined, it just does not exist in generated code so the compilation will fail, if you get a compilation error in your sources with some `AnyType` that is not defined, look for additionalProperties in your schema +- Values of type object without properties don’t serialize with spray json. You can have ```componets/schemas//properties/``` and every property has a type, like string, int, etc.., you can have type as object, but if you do so, you must provide object properties as well like in example below, if you don’t add it, it will generate this object type with Any in scala, and then the Akka marshaller will fail, because we use SprayJson there, and it does not support Reader and Writer for type Any (basically it can’t serialize type Any into json), you could probably define Writer and Reader for type Any to be an empty object, but I personally don’t see a reason to have value of type object and not define what properties it is going to have anyway. +- ```requestBody``` in every path must be explicitly ```required:true```. It is ```false``` by default, if not marked as ```true``` it will generate a service functions that accepts ```Option[Type]``` instead of ```Type``` but endpoints are always expecting ```Type``` even if required is ```false```, not ```Option[Type]```, then when you try to generate sources you will get compilation error ```expecting Type got Option[Type]``` + +## Decision Drivers + +- enforce type-safety to endpoint definitions using Scala compiler and Tapir DSL, add CI for endpoints definitions +- make endpoint definitions convenient for engineers by reusing common abstractions and definitions +- introduce a standard types, schemas and approaches for all endpoint definitions: query, pagination, responses, etc +- reuse endpoint definitions for creating server and client stubs in Scala +- align the server side of REST API with the current technology stack (ZIO + ecosystem) +- have a control over the codebase and data types +- reduce time-of-maintenance of the code (either OAS should be adapted for generator or mustache templates should be fixed) +- functional way of implementation of non-functional requirement (metrics, tracing, logging) +- straight forward generation of Swagger UI, Redoc documentation and Async API documentation based on endpoint definitions + +## Considered Options + +- use OpenAPI tools (edit OAS manually, generate server stub for Akka and client stubs for any other languages) +- use OpenAPI tools, but generate code for other server-side library (Play, Finch, Lagom) +- use Tapir library (edit endpoint definitions as Scala code, reuse endpoint definitions for server stubs, generate OAS based on endpoint definitions, generate client stubs for any other language) + +## Decision Outcome + +Chosen option:"use Tapir library" till the end of the year, evaluate this solution in 2023 + +All endpoint definition are written in Tapir DSL. + +OpenAPI specification generated based on endpoint definition and is published as an artefact. (must be a part of CI) + +The server side is interpreted using a ZIO-HTTP interpreter to be aligned with the given technology stack. + +Client side stubs are generated using OpenAPI tools and OpenAPI specification file. (must be a part of CI) + +For server-side code the flow is following: + +
+graph TD
+    ED(Endpoint Definition) --> |Generate| OAS(OpenAPI Specification)
+    ED --> |Generate| AAUI(AsyncAPI Specification)
+    ED --> |Interpret| SSS(Scala Server Stub)
+    ED --> |Interpret| SCS(Scala Client Stub)
+    ED --> |Produce| SUI(Swagger UI)
+    ED --> |Produce| RUI(Redoc UI)
+    OAS --> |Input| OAT(OpenAPI Tools)
+    OAT --> |Generate| SS(Server Stub)
+    OAT --> |Generate| CS(Client Stub)
+
+ +### Positive Consequences + +- Type-safety and OAS configuration as a code will speed up development +- Generated OpenAPI specification is unified according to the single standard (Tapir generator) +- Errors in the types and endpoint definitions will be found in compile-time +- Code generations will be replaced with interpretation with higher guarantees of stability +- Engineers will save time for feature implementation instead of investigating the issues with AOS files or templates +- Better management of OAS spec and control over the documentation (Swagger UI, Redoc, Async API for WebSockets) + +### Negative Consequences +- Not all engineers will be able to edit the endpoint definitions in Tapir DLS, so either only engineer with Scala knowledge will do this, or knowledge sharing and workshops "How to use Tapir" are required. +- OAS is going to be generated from the model defined by DLS, so the granular/manual control over the spec will be replaced by Tapir generator +- There is a risk that Tapir might have some hidden surprises and constraints + +### Option 1 & 2: Feature Implementation Workflow +
+graph TD
+    U[Start Feature] --> |Edit OAS| A
+    A[OAS File] --> |Input| E
+    U --> |Edit Template| E
+    E[Generator & Templates]-->|Generate Server Code| B(Server Code)
+    E -->|Generate Client Code| C(Client Code)
+    C -->|Compile| OC(Other Compiler)
+    OC -->|Compilation Error| I
+    OC -->|Success| T
+    E -->|Host file as Swagger UI| D(Swagger)
+    B --> |Compile| S(Scala Compiler)
+    S --> |Compilation Error| I(Investigate)
+    I --> |Try again| U
+    S --> |Success| T(Complete Feature)
+
+ +### Option 3: Feature Implementation Workflow +
+graph TD
+    U[Start Feature] --> |Edit Endpoint Specification| ED(Endpoint Definition)
+    U --> |Edit Input/Output Types| DM(Domain Model)
+    ED --> |Input| TE(Tapir Library)
+    DM --> |Input| TE
+    TE --> |Generate| A
+    TE --> |Interpret| SC(Server Code)
+    TE --> |Interpret| CC(Client Code)
+    TE --> |Produce| SW(Swagger UI)
+    TE --> |Produce| RD(Redoc UI)
+    TE --> |Compilation Error| U
+    A[OAS File] --> |Input| E
+    U --> |Edit Template| E
+    E[Generator & Templates]-->|Generate Server Code| B(Server Code)
+    E -->|Generate Client Code| C(Client Code)
+    C -->|Compile| OC(Other Compiler)
+    OC -->|Compilation Error| I
+    OC -->|Success| T
+    E -->|Host file as Swagger UI| D(Swagger)
+    B --> |Compile| S(Scala Compiler)
+    S --> |Compilation Error| I(Investigate)
+    I --> |Try again| U
+    S --> |Success| T(Complete Feature)
+
+ +## Pros and Cons of the Options + +### Option 1: use OpenAPI tools and mustache templates for Akka server + +- Good, because @Pat and @Shota already stabilized the templates, and we have a working solution +- Good, because any engineer from CoreDID and Product Foundry team is able to contribute to the documentation +- Good, because the same source of truth (OAS file) is used to generate Server and Client stub (less integration problems for client stubs) +- Bad, because there are known constraints in the mustache templates that can slow down engineering +- Bad, because Akka changed the licence and version 2.6.x will not be supported in 1 year. +- Bad, because it's hard to keep the same standard for OAS that are written by different engineers +- Bad, because all OAS files are merged together at infrastructure level which is slightly complex solution for this task. +- Bad, because Akka Framework is not in ZIO ecosystem (it's not a good practice to use both frameworks) + +### Option 2: use OpenAPI tools and mustache templates for alternative Scala server libraries (Finch, Lagom, Play ) + +[example | description | pointer to more information | …] +- All ```good``` and ```bad``` are the same as in Option 1 +- Bad, because we don't know if the mustache templates are good enough for Scala 3 +- Bad, because we need to evaluate if engineering team have the experience in Finch, Lagom or Play + +### Optoin 3: use Tapir as DSL for OpenAPI specification + +[example | description | pointer to more information | …] + +- Good, because type-safety and DLS will save the engineering time by providing a quick feedback loop in compile time +- Good, because generated OAS will be aligned with the common standards +- Good, because engineers can define and reuse the abstractions in FP way +- Good, because entities (inputs/outputs) will be reused by Scala backend library +- Good, because the endpoint definition will be reused in Scala Server or Client stub +- Good, because there is no need to generate the code, stubs are interpreted by the library +- Good, because ZIO-HTTP will be used, which is aligned with the current stack +- Good, because Open API, Swagger, Redoc, Async API document/site generations are supported +- Bad, because only Scala engineers will be able to edit the documentation +- Bad, because the granular control over OAS YAML file will be lost (OAS file is generated automatically) +- Bad, because we need to spend 3-5 day to transform OAS files into Tapir DSL + +## How to migrate from the current state to Tapir? +### Current state: OpenAPI Tools + mustache templates for Akka server +### Desired state: Endpoint Definitions in Tapir + ZIO-HTTP +Estimated migration time is 4-6 days which we don't really want to waste. + +So, engineering team can proceed with keeping the existing endpoints in the current state and even work on the new endpoints using generated server stubs for Akka. + +At the same time OAS file can be translated to Tapir step-by-step and the endpoint definitions can be [interpreted by Tapir library as Akka routes](https://tapir.softwaremill.com/en/latest/server/akkahttp.html), and attached to the server endpoint in the same way as generated endpoints. + +This transitions period might take 2-3 weeks till engineering team get enough knowledge of using Tapir. + +Then all the endpoints are translated to Tapir, it will be possible to switch the interpreter from Akka to [ZIO-HTTP library](https://tapir.softwaremill.com/en/latest/server/ziohttp.html). + +## Links + +- [OpenAPI Tools](https://github.com/OpenAPITools/openapi-generator) +- [Goals of Tapir library](https://tapir.softwaremill.com/en/latest/goals.html) +- [Tapir](https://tapir.softwaremill.com/en/latest/index.html) + + diff --git a/documentation/adrs/decisions/20221006-store-private-keys-of-issuers-inside-prism-agent.md b/documentation/adrs/decisions/20221006-store-private-keys-of-issuers-inside-prism-agent.md new file mode 100644 index 000000000..1adad8801 --- /dev/null +++ b/documentation/adrs/decisions/20221006-store-private-keys-of-issuers-inside-prism-agent.md @@ -0,0 +1,30 @@ +# Store private keys of Issuers inside the Cloud Agent + +- Status: accepted +- Deciders: Benjamin Voiturier, Pat Losoponkul, Miloš Džepina, Shailesh Patil, Shota Jolbordi, Bart Suichies, Ezequiel Postan, Yurii Shynbuiev, David Poltorak +- Date: 2022-10-05 + +## Context and Problem Statement + +While each holder has a wallet application on the phone (edge agent) to store private keys, contacts, and credentials, Identus Cloud Agent will provide a custodial solution to Issuers and Verifiers. Thus they won't have their wallets or store/manage keys. There needs to be storage for the private keys of Issuers and Verifiers on the Cloud Agent side. + + +## Considered Options + +- Having issuers store and manage their own keys on the edge wallet (Prism 1.4 approach) +- Storing keys in a dedicated wallet application that is connected to the Cloud Agent +- Having the Cloud Agent store and manage keys directly + + +## Decision Outcome + +Chosen option: Option 3, because it is the simplest approach that satisfies the needs of providing the Issuer and Verifier with key storage while also not requiring them to manage their own keys. Option 3 was chosen instead of Option 2 because it achieves the same goal but does not require work on integrating another wallet application, so in short, it is simpler and faster to implement. + +### Negative Consequences + +While Option 3 is simpler to implement then Option 2 and provides basic functionality required to solve the problem emphasized in [Context and Problem Statement](#context-and-problem-statement), it does not provide full functionality and security of widely used and well tested wallet application. Therefore this decision is considered to be temporary and made only in the interest of solving the problem as fast as possible. + + +## Links + +- [Recording of the meeting where decision was made](https://drive.google.com/file/d/120YyW2IEpl-F-6kF0V0Fau4bM7BbQ6mT/view?usp=sharing) diff --git a/documentation/adrs/decisions/20230118-quill-library-for-sql-statement-generation.md b/documentation/adrs/decisions/20230118-quill-library-for-sql-statement-generation.md new file mode 100644 index 000000000..301e5dfda --- /dev/null +++ b/documentation/adrs/decisions/20230118-quill-library-for-sql-statement-generation.md @@ -0,0 +1,179 @@ +# Quill library for SQL statement generation and validation + +- Status: accepted +- Deciders: Yurii Shynbuiev, Fabio Pinheiro, Benjamin Voiturier +- Date: [2023-01-17] +- Tags: DAL, SQL, Postrgresql, Typesafe + +## Context and Problem Statement + +PostgreSQL is essential to the Identus platform technology stack, where most entities are stored. + +Backend services: Identus Cloud Agent, Identus Mediator, and PRISM Node use PostgreSQL. + +[Doobie](https://tpolecat.github.io/doobie/index.html) library is currently used in Scala code to communicate with Postgresql. Quotes from the website + +``` +Doobie is a pure functional JDBC layer for Scala and Cats. It is not an ORM, nor is it a relational algebra; +it simply provides a functional way to construct programs (and higher-level libraries) that use JDBC +doobie is a Typelevel project. +This means we embrace pure, typeful, functional programming, and provide a safe and friendly environment for teaching, learning, and contributing as described in the Scala Code of Conduct. +``` +Doobie is a good choice for DAL, and this ADR is about something other than replacing it. + +Writing the SQL statement and mapping the row to the case class is a boilerplate and error-prone activity that the Quill library can optimize. + +**Writing the code for mapping a table row to a case class and writing the low-level SQL statement is an error-prone and boilerplate thing** + +**Using the [Quill](https://getquill.io/) library on top of Doobie can optimize and improve these things.** +Quote from the website: + +``` +Quill provides a Quoted Domain Specific Language (QDSL) to express queries in Scala and execute them in a target language. The library’s core is designed to support multiple target languages, currently featuring specializations for Structured Query Language (SQL) and Cassandra Query Language (CQL). + +1. Boilerplate-free mapping: The database schema is mapped using simple case classes. +2. Quoted DSL: Queries are defined inside a quote block. Quill parses each quoted block of code (quotation) at compile time and translates them to an internal Abstract Syntax Tree (AST) +3. Compile-time query generation: The ctx.run call reads the quotation’s AST and translates it to the target language at compile time, emitting the query string as a compilation message. As the query string is known at compile time, the runtime overhead is very low and similar to using the database driver directly. +4. Compile-time query validation: If configured, the query is verified against the database at compile time, and the compilation fails if it is not valid. The query validation does not alter the database state. +``` + +There are [Slick](https://scala-slick.org/) and [ScalikeJDBC](http://scalikejdbc.org/) libraries as well. + +Comparison of these libraries is not a goal of this ADR, but it's essential to know the differences. + +There are good references to take a look at in the [Links](#links) section. + +Overall, all libraries have differences in the following aspects: + +- Metamodel (how to define the schema and type mapping) +- Static SQL statement (how and where does the SQL statement is written/generated) +- Dynamic SQL statement (how and where does the dynamic SQL statement written/generated) +- Connection Management (thread and connection pooling) +- Asynchronous API (the high-level API to execute queries blocking or non-blocking) +- Asynchronous IO (is IO operation blocking or asynchronous) +- Effect library that is used (free-monad, Future, Task, ZIO) + +## Decision Drivers + +- Generate and validate SQL statement based on the convention-over-configuration approach in compile time (type-safe queries) +- Reduce boilerplate and error-prone code +- Easy to write the dynamic queries + +## Considered Options + +- Doobie (Quill for the connection pooling, SQL statement execution, and SQL statement writing) +- Doobie + Quill (Quill for the connection pooling, SQL statement execution, and SQL statement writing + Quill for the SQL statement generation) +- Quill (Quill for the connection pooling, SQL statement execution, and SQL statement writing and generation) + +## Decision Outcome + +Chosen option: "Doobie + Quill" because it's the simplest solution that requires minimal changes to the existing code and brings the benefits of automatic SQL statement generation and validation in compile time (see below). + +### Positive Consequences + +- convention-over-configuration approach for the generation and validation of SQL statements using macros in the compile time +- easy work with dynamic queries +- backward compatible solution (minimum changes are required for the current code base) + +### Negative Consequences + +- DTO case classes are required for each table to generate the SQL statement based on the convention + +## Pros and Cons of the Options + +### Doobie + +Doobies library is used as it is right now without any changes + +- Good, because it is a solid FP library for Postgresql +- Good, because it has good documentation and a large community of developers who contribute to the library +- Good, because it is built using Free Monad, which makes it composable and easy to integrate with any popular effects library +- Bad, because it has a low-level API for writing the SQL statement (boilerplate and error-prone code) +- Bad, because it uses blocking IO at the network level + +### Doobie+Quill + +Doobie library is used as it is right now, and Quill library is used for SQL statement generation and validation in compile time + +- Good, because it ss a solid FP library for Postgresql +- Good, because it has good documentation and a large community of developers who contribute to the library +- Good, because it is built using Free Monad, which makes it composable and easy to integrate with any popular effects library +- Good, because Quill library is used for SQL statement generation at the compile time +- Good, because Quill library extends the current solution, and no changes to the code base are required +- Bad, because the DTO case class must be created for each table +- Bad, because it uses blocking IO at the network level + +### Quill + +Quill is used instead of Doobie + +- Good, because it is a solid FP library for Postgresql +- Good, because it has good documentation and a large community of developers who contribute to the library +- Good, because it is built using Free Monad, which makes it composable and easy to integrate with any widespread effects library +- Good, because it is used for SQL statement generation at the compile time instead of using Doobie low-level API +- Good, because it can be configured to use non-blocking IO at the network level +- Good, because it get rid of the `cats` ecosystem that comes with `doobie` (simplify the dependency management) +- Bad, because significant refactoring of all DAL is required +- Bad, because the DTO case class must be created for each table + +## Examples + +### Doobie + +``` +import doobie._ +import doobie.implicits._ +import doobie.postgres._ + +case class Person(id: Int, name: String) + +val q = sql"SELECT id, name FROM person WHERE id = 1".query[Person] + +val result: ConnectionIO[List[Person]] = q.to[List].transact(Transactor.fromDriverManager[IO]( + "org.postgresql.Driver", "jdbc:postgresql:world", "username", "password" +)) +``` + +### Quill + +``` +import io.getquill._ + +val ctx = new SqlMirrorContext(PostgresDialect, "ctx") + +case class Person(id: Int, name: String) + +val q = quote { + query[Person].filter(p => p.id == 1) +} + +val result: List[Person] = ctx.run(q) +``` + +### Slick + +``` +import slick.jdbc.PostgresProfile.api._ + +val db = Database.forConfig("database") + +case class Person(id: Int, name: String) + +val q = TableQuery[Person].filter(_.id === 1) + +val result: Future[Seq[Person]] = db.run(q.result) +``` + +#### Two more real example of Doobie and Quill usage are in the [Links](#links) section + +## Links + +- [Comparing Scala relational database access libraries](https://softwaremill.com/comparing-scala-relational-database-access-libraries/) +- [Comparison with Alternatives](https://scala-slick.org/docs/compare-alternatives) +- [Doobie vs Quill](https://www.libhunt.com/compare-doobie-vs-zio-quill) +- [Slick vs Doobie](https://www.libhunt.com/compare-slick--slick-vs-doobie?ref=compare) +- [Database access libraries in Scala](https://medium.com/@takezoe/database-access-libraries-in-scala-7aa7590aa3db) +- [Typechecking SQL queries with doobie](https://godatadriven.com/blog/typechecking-sql-queries-with-doobie/) +- [Typechecking SQL in Slick and doobie](https://underscore.io/blog/posts/2015/05/28/typechecking-sql.html) +- [Doobie example in the Pollux library](https://github.com/hyperledger/identus-cloud-agent/blob/pollux-v0.17.0/pollux/lib/sql-doobie/src/main/scala/io/iohk/atala/pollux/sql/repository/JdbcCredentialRepository.scala) +- [Quill example in the Pollux library](https://github.com/hyperledger/identus-cloud-agent/blob/pollux-v0.17.0/pollux/lib/sql-doobie/src/main/scala/io/iohk/atala/pollux/sql/model/VerifiableCredentialSchema.scala) diff --git a/documentation/adrs/decisions/20230206-use-flyway-to-manage-migrations-for-application-services.md b/documentation/adrs/decisions/20230206-use-flyway-to-manage-migrations-for-application-services.md new file mode 100644 index 000000000..84e78458b --- /dev/null +++ b/documentation/adrs/decisions/20230206-use-flyway-to-manage-migrations-for-application-services.md @@ -0,0 +1,73 @@ +# Use flyway to manage migrations for application services + +- Status: [ accepted | deprecated | superseded by [xxx](yyyymmdd-xxx.md)] +- Deciders: [list everyone involved in the decision] +- Date: [YYYY-MM-DD when the decision was last updated] +- Tags: [space and/or comma separated list of tags] + +Technical Story: [description | ticket/issue URL] + +## Context and Problem Statement + +[Describe the context and problem statement, e.g., in free form using two to three sentences. You may want to articulate the problem in form of a question.] + +## Decision Drivers + +- [driver 1, e.g., a force, facing concern, …] +- [driver 2, e.g., a force, facing concern, …] +- … + +## Considered Options + +- [option 1] +- [option 2] +- [option 3] +- … + +## Decision Outcome + +Chosen option: "[option 1]", because [justification. e.g., only option, which meets k.o. criterion decision driver | which resolves force force | … | comes out best (see below)]. + +### Positive Consequences + +- [e.g., improvement of quality attribute satisfaction, follow-up decisions required, …] +- … + +### Negative Consequences + +- [e.g., compromising quality attribute, follow-up decisions required, …] +- … + +## Pros and Cons of the Options + +### [option 1] + +[example | description | pointer to more information | …] + +- Good, because [argument a] +- Good, because [argument b] +- Bad, because [argument c] +- … + +### [option 2] + +[example | description | pointer to more information | …] + +- Good, because [argument a] +- Good, because [argument b] +- Bad, because [argument c] +- … + +### [option 3] + +[example | description | pointer to more information | …] + +- Good, because [argument a] +- Good, because [argument b] +- Bad, because [argument c] +- … + +## Links + +- [Link type](link to adr) +- … diff --git a/documentation/adrs/decisions/20230206-use-scala3-instead-of-scala2-to-write-applications.md b/documentation/adrs/decisions/20230206-use-scala3-instead-of-scala2-to-write-applications.md new file mode 100644 index 000000000..ce19fb627 --- /dev/null +++ b/documentation/adrs/decisions/20230206-use-scala3-instead-of-scala2-to-write-applications.md @@ -0,0 +1,73 @@ +# Use Scala3 instead of Scala2 to write applications + +- Status: [ accepted | deprecated | superseded by [xxx](yyyymmdd-xxx.md)] +- Deciders: [list everyone involved in the decision] +- Date: [YYYY-MM-DD when the decision was last updated] +- Tags: [space and/or comma separated list of tags] + +Technical Story: [description | ticket/issue URL] + +## Context and Problem Statement + +[Describe the context and problem statement, e.g., in free form using two to three sentences. You may want to articulate the problem in form of a question.] + +## Decision Drivers + +- [driver 1, e.g., a force, facing concern, …] +- [driver 2, e.g., a force, facing concern, …] +- … + +## Considered Options + +- [option 1] +- [option 2] +- [option 3] +- … + +## Decision Outcome + +Chosen option: "[option 1]", because [justification. e.g., only option, which meets k.o. criterion decision driver | which resolves force force | … | comes out best (see below)]. + +### Positive Consequences + +- [e.g., improvement of quality attribute satisfaction, follow-up decisions required, …] +- … + +### Negative Consequences + +- [e.g., compromising quality attribute, follow-up decisions required, …] +- … + +## Pros and Cons of the Options + +### [option 1] + +[example | description | pointer to more information | …] + +- Good, because [argument a] +- Good, because [argument b] +- Bad, because [argument c] +- … + +### [option 2] + +[example | description | pointer to more information | …] + +- Good, because [argument a] +- Good, because [argument b] +- Bad, because [argument c] +- … + +### [option 3] + +[example | description | pointer to more information | …] + +- Good, because [argument a] +- Good, because [argument b] +- Bad, because [argument c] +- … + +## Links + +- [Link type](link to adr) +- … diff --git a/documentation/adrs/decisions/20230206-use-zio-as-a-functional-effect-system-within-applications-to-manage-conccurency.md b/documentation/adrs/decisions/20230206-use-zio-as-a-functional-effect-system-within-applications-to-manage-conccurency.md new file mode 100644 index 000000000..7e5eee278 --- /dev/null +++ b/documentation/adrs/decisions/20230206-use-zio-as-a-functional-effect-system-within-applications-to-manage-conccurency.md @@ -0,0 +1,73 @@ +# Use ZIO as a functional effect system within applications to manage conccurency + +- Status: [ accepted | deprecated | superseded by [xxx](yyyymmdd-xxx.md)] +- Deciders: [list everyone involved in the decision] +- Date: [YYYY-MM-DD when the decision was last updated] +- Tags: [space and/or comma separated list of tags] + +Technical Story: [description | ticket/issue URL] + +## Context and Problem Statement + +[Describe the context and problem statement, e.g., in free form using two to three sentences. You may want to articulate the problem in form of a question.] + +## Decision Drivers + +- [driver 1, e.g., a force, facing concern, …] +- [driver 2, e.g., a force, facing concern, …] +- … + +## Considered Options + +- [option 1] +- [option 2] +- [option 3] +- … + +## Decision Outcome + +Chosen option: "[option 1]", because [justification. e.g., only option, which meets k.o. criterion decision driver | which resolves force force | … | comes out best (see below)]. + +### Positive Consequences + +- [e.g., improvement of quality attribute satisfaction, follow-up decisions required, …] +- … + +### Negative Consequences + +- [e.g., compromising quality attribute, follow-up decisions required, …] +- … + +## Pros and Cons of the Options + +### [option 1] + +[example | description | pointer to more information | …] + +- Good, because [argument a] +- Good, because [argument b] +- Bad, because [argument c] +- … + +### [option 2] + +[example | description | pointer to more information | …] + +- Good, because [argument a] +- Good, because [argument b] +- Bad, because [argument c] +- … + +### [option 3] + +[example | description | pointer to more information | …] + +- Good, because [argument a] +- Good, because [argument b] +- Bad, because [argument c] +- … + +## Links + +- [Link type](link to adr) +- … diff --git a/documentation/adrs/decisions/20230405-did-linked-resources.md b/documentation/adrs/decisions/20230405-did-linked-resources.md new file mode 100644 index 000000000..b6f0256cc --- /dev/null +++ b/documentation/adrs/decisions/20230405-did-linked-resources.md @@ -0,0 +1,733 @@ +# DID-linked-resources + +- Status: draft +- Deciders: Yurii Shynbuiev, Benjamin Voiturier, Lohan Spies, Ezequiel Postan, Shota Jolbordi +- Date: 2023-04-05 +- Tags: did, linked-data, ledger + +## Target + +[Research Spike - Schema and Verifiable Presentation Registry](https://input-output.atlassian.net/browse/ATL-3186) + +- Provide a clear and concise analysis of the various schema registry implementation and the associated benefits and downfalls of each approach. +- Provide a concrete proposal for what we would like to implement for the Identus platform. +- Provide a generic way of storing and linking the resources for the DID in the Identus platform. + +## Context and Problem Statement + +Identus platform must be able to store and distribute the various resources such as credential schemas, logos, revocation status lists, and documents (aka any text, JSON, images, etc). But in the scope of the current ADR the following resource types are discussed: + +- credential schema (JSON and AnonCreds) +- credential definition (AnonCreds) +- revocation list + +**NOTE**: Resources containing the PII must never be stored on-chain. + +Requirements for storing and distributing resources: + +- Decentralization - resources must be stored in decentralized storage +- Discoverability - it should be possible to discover the resource +- Longevity - the ability of a storage solution to maintain the availability and integrity of stored resources +- Interoperability - it should be possible for other SSI systems to fetch the resource +- Trust - resources must be stored in reliable tamper-proof storage and be trusted by other SSI systems + +Other requirements, such as `versioning`, `security` and `immutability` are out of the scope of this ADR: + +- Versioning - is a specific requirement for the particular resource and belongs to the resource metadata +- Security - is an important aspect that must be taken into account by the underlying storage system and data access layer +- Immutability - is one of the strategies to guarantee `trust` and `decentralisation``, but it shouldn't be a requirement by itself. + +The technical solution contains a lot of variations and particular small decisions but overall it can be split into two main questions: + +- where the resource is stored? +- how the resource is discovered and fetched? + +## Constraints + +### Storage limitations + +All decentralized storage (DLT or IPFS) has storage limitations, and the amount of data that can be stored is limited by the available storage capacity and the way how the resources are stored. + +The following aspect must be taken into account for storing the resources in DLT: + +- transaction size limit (can be mitigated by data fragmentation, so the single resource is stored in multiple transactions) - 16KB, 32KB, 64KB, up to 1MB - depending on the type of the blockchain +- throughput - bytes we can insert to storage per unit of time +- latency - multi-second time per insert +- cost - each insertion costs fees + +Based on the nature of the resource the size limitations must be considered. + +For the following resource types and the common use cases 16KB should be enough, so it's possible to store these on DLT: +- credential schema +- credential definition +- logo in SVG format +- Merkle Tree +- documentation in the markdown format + +For larger resource types IPFS or another option should be considered. Large resource examples: +- media files +- large documents +- large revocation status lists + +IPFS doesn't have a size limitation (it's limited by the underlying storage or the particular SDK) and requires additional infrastructure and `incentives` (the way to pay for the storage) from the community. + +IPFS can be used for storing the resources, but it should be covered in the scope of separated ADR. + +### Scalability + +While DLT and IPFS are designed for scalability, they can still face issues with scalability when it comes to storing SSI resources. As more users store their SSI resources on these platforms, it can become more difficult to scale the infrastructure to handle the increased demand. + +To mitigate the scalability challenge the `hybrid` solution can be considered. In this case, the resource is stored in the centralized database or IPFS system, and the `hash`, `signature` or other metadata that guarantees the `trust` is stored on-chain. + +Storing credential schemas, and logos, not large documents don't require the hybrid solution, so the use cases for it is out of scope in the current ADR. + +Scalability issues also must be considered in the decision for linking the resources to the DID. For instance, the Cheqd solution keeps all the resources linked to the DID inside of the metadata of the DIDDoc which leads to growing the DIDDoc size after some period and an update of the DID Document when the new resource is published on-chain and linked to the DID. + +### Access control + +SSI resources stored in DLT and IPFS can be accessed by anyone who has access to the network. + +This can be a security concern for organizations that need to control access to their SSI resources. + +Access control also can be an issue for interoperability with other SSI systems. + +The types of resources such as credential schemas, logos, and revocation lists should be available without additional access control. + +### Data privacy + +While DLT and IPFS are designed to be secure, there is still a risk that SSI resources stored on these platforms could be accessed or stolen by unauthorized parties. + +This is especially concerning when it comes to sensitive personal information. + +Personal data or any other sensitive information should not be stored on-chain and be available for unauthorized parties. + +Credential schemas, documents, and logos usually do not contain personal data, so can be stored on-chain. + +Revocation lists that are designed using privacy-preserving capabilities can be stored on-chain as well. + +## Decision Drivers + +- Interoperability +- Trust +- Longevity +- Scalability +- Discoverability +- Vendor Lock + +## Storage + +Choosing the right storage for resources is an architectural decision. + +Companies that build SSI platforms usually use the underlying blockchain for storing the resources in a generic way and an API layer with an underlying centralized database and SDK for indexing, and access to the resources. + +Usually, resources are stored efficiently (any binary format, protobuf, CBOR) on-chain to reduce the size and the cost of the operation. + +The application layer that communicates with the underlying blockchain is used for publishing and retrieval of the resource. +Based on concrete implementation, the resources can be decoded and indexed in the database and available via internal API, SDI, or Universal Resolver. + +Storing resources off-chain also makes sense, but in this case, the `longevity` of the storage and an API layer is limited by the lifetime of the organization that published the resource. For this solution, `trust` can be achieved by signing the resource using the key of the DID stored on-chain. This solution is not fully centralized as the organizations have their infrastructure with the database. + +## Linking the resource to the DID + +Linking a resource to a DID means associating a specific resource with a DID and resolving the resource via the Universal Resolver or application API or SDK or finding it on-chain. + +In all the considered solutions this is achieved using a DID document and the algorithm for discovery and resource dereferencing. + +## Considered Options + +### DID document linkedResources field + +The particular resource must be available via URL and the metadata of the resource are described in the `linkedResources` array. + +Example: + +``` +{ + "@context": "https://w3id.org/did/v1", + "id": "did:example:123456789abcdefghi", + "publicKey": [{ + "id": "did:example:123456789abcdefghi#keys-1", + "type": "Ed25519VerificationKey2018", + "controller": "did:example:123456789abcdefghi", + "publicKeyBase58": "7dNN1A8H4DwPU1h4btvohGadnbx8sHF2U6XJU6vLBBfA" + }], + "linkedResources": [{ + "url": "https://example.com/credentialschema/123", + "type": "CredentialSchema", + "name": "DrivingLicense" + }] +} +``` + +#### Positive Consequences + +- solution describes the simple way of linking the resources to the DID Document. This approach looks outdated and is not part of the did-core specification. + +#### Negative Consequences + +The drawbacks of the solution: + +- interoperability: it is not a part of the DID-core specification anymore even if it is possible to find information about it +- interoperability: the resource should be fetched by the application or SDK +- trust: must be guaranteed by the underlying DLT, the DID document should have an anchor: hash, signature or Tx id that references to the DLT +- discoverability: information about the resource's metadata (author, version, content type) is absent +- scalability: DID document must be updated when a new resource is added, so the solution sacrifices `scalability` as the content of the DID document will grow + +#### Out of the Scope + +- longevity: should be guaranteed by the underlying DLT + +### DID document didDocumentMetadata -> linkedResourceMetadata (Cheqd ADR) + +Each resource entry is a part of the collection and is described in the `linkedResourceMetadata` field. + +The solution is described in the Cheqd ARD in the [Links](#links) section of the current ADR + +Example: + +``` +{ + "didDocumentMetadata": { + "linkedResourceMetadata": [ + { + "resourceURI": "did:cheqd:mainnet:1f8e08a2-eeb6-40c3-9e01-33e4a0d1479d/resources/f3d39687-69f5-4046-a960-3aae86a0d3ca", + "resourceCollectionId": "1f8e08a2-eeb6-40c3-9e01-33e4a0d1479d", + "resourceId": "f3d39687-69f5-4046-a960-3aae86a0d3ca", + "resourceName": "PassportSchema", // First version of a Resource called PassportSchema + "resourceType": "CL-Schema", + "mediaType": "application/json", + "created": "2022-07-19T08:40:00Z", + "checksum": "7b2022636f6e74656e74223a202274657374206461746122207d0ae3b0c44298", + "previousVersionId": null, // null if no previous version, otherwise, resourceId of previous version + "nextVersionId": null, // null if no new version, otherwise, resourceId of new version + } + ] + } +} +``` + +The solution is not fully interoperable with the SSI ecosystem, but it's probably the first successful specification that formalizes the DID-linked resources and the DID URL. + +Cheqd's approach for linking the resources to the DID is not a part of the current version of DID specification. Even if it's possible to find some information about `linkedResources` and `linkedResourceMetadata` optional field of the DIDDoc in the cache of the search system or ChatGPT. + +Looks like the ToIP specification is inspired by Cheqd's ADR. + +#### Positive Consequences + +- versioning of the resource, as metadata contains the references to the previous and next version +- collection definition is formalized and published on-chain and in the DID document, so all the resources are categorized +- discoverability: URI is replaced with DID URL that allows discovering the resource using Internal and/or Universal resolver +- trust: the `checksum` is provided, so it is possible to verify that the resource was not modified by 3rd party + + +#### Negative Consequences + +- scalability: the DID document should be updated when the new resource is created +- interoperability: using the Universal Resolver is optional, so either SDK or internal application API must be used to fetch the resource +- standard: the `linkedResourceMetadata` field is not a standard part of the DID specification, so the application should be aware of how to deal with it + + +### DID URL dereferencing (W3C specification) + +The current solution is based on the dereferencing algorithm described in the [DID-Resolution#dereferencing](https://w3c-ccg.github.io/did-resolution/#dereferencing) specification and describes how the DID resolver can dereference the resource linked to the DID. It does not describe where the resource is stored. + +The main idea is an algorithm that allows using the DID URL and the information about the services in the DID Document that allows DID Resolver to compose the final resource URL and return the requested resource. + +Dereference is performed by defining the service `id` and `relativeRef` params or `path` in the DID URL + +**NOTE:** +The `service.type` property is not taken into account in this flow. +According to the did-core specification, the service type and its associated properties SHOULD be registered in the [DID Specification Registries]( +https://www.w3.org/TR/did-spec-registries/#service-types). +So, defining and registering the `schemaService` or `resourceService` should be the next step to facilitate the interoperability of SSI systems. + +Example 1: using `service` and `relativeRef` + +The credential schema resource can be defined as a DID URL + +``` +did:prism:abcdefg?service=credentialschema&relativeRef=%2Fcredentialschemas%2F123e4567-e89b-12d3-a456-426614174000 +``` + +and the DID Document must have the service defined + +``` +{ + "service": [ + { + "id": "did:prism:abcdefg#credentialschema", + "type": "CredentialSchema", + "serviceEndpoint": "https://agent.example.com/schema-registry" + } + ] +} +``` + +so, the Universal Resolver using the concrete DID resolver must dereference the resource as + +``` +https://agent.example.com/schema-registry/credentialschemas/F123e4567-e89b-12d3-a456-426614174000 +``` + +and should return the instance of the credential schema + +Example 2: is another variation but using `service` and `path` in the DID URL + +``` +did:prism:abcdefg/credentialschemas/123e4567-e89b-12d3-a456-426614174000?service=credentialschema +``` + +In this case, the DID Method may describe how the path should be resolved and the resource must be fetched. + +#### Positive Consequences + +- interoperability: the resource is resolved by the conformant DID resolver according to the specification +- discoverability: the resource defined in DID URL is resolved and fetched dynamically +- scalability: the DID document is not updated for each new resource + +#### Negative Consequences + +- specification: for the particular cases when the `path` is used in the DID URL, the resolution behavior must be described in the DID Method +- scalability: the algorithm contains 2 or 3 steps and the DID Document is always must be resolved in the first step + +#### Out of the Scope +- trust, longevity, and technology stack are not specified in this solution + +### DID URL Dereferencing (Trust over IP specification - outdated) + +[ToIP specification](https://wiki.trustoverip.org/display/HOME/DID+URL+Resource+Parameter+Specification) is an analog of the W3C dereferencing specification and describes the convention for dereferencing the resources from the DID URL + +The main idea is the same: use the DID URL and a combination of convention and the DID method to resolve the digital resource associated with the DID. + +But instead of relying on the `service` and the `relativeRef` parameter, the ToIP spec is focused on the `resource` parameter - so, if the DID URL contains the `resource` parameter - it must return the resource. + +Example: + +``` +did:example:21tDAKCERh95uGgKbJNHYp/some/path?resource=true + +did:example:21tDAKCERh95uGgKbJNHYp/some/longer/path?resource=json + +did:example:21tDAKCERh95uGgKbJNHYp/uuid:33ad7beb-1abc-4a26-b892-466df4379a51/?resource=ld+json + +did:example:21tDAKCERh95uGgKbJNHYp/resources/uuid:33ad7beb-1abc-4a26-b892-466df4379a51/?resource=cbor +``` + +The main disadvantage of this approach is that the logic for resolving and fetching the resource associated with the given DID URL completely relies on DID method specification (in the W3C variation it's just a convention and the algorithm for the resource resolution) + +ToIP specification doesn't describe the details about the storage of the underlying resource. It might be DLT (blockchain or IPFS) or classic cloud or on-premise storage. + +### DID URL Dereferencing (Trust over IP specification - latest) + +The new specification for DID URL dereferencing is an improved specification with recommended Cheqd idea to publish the resource metadata in the DID Document. + +The main difference with the previous specification is an introduction of parameters that can discover the resource (instead of using `resource` field only) and simplification of the Cheqd's approach by skipping the `collection` abstraction. + +The DID Document refers to the associated resource via linked resource metadata. + +The changes to the DID method are also required (described in the Verifiable Data Registry and DID Method Requirements) + +The current status of the document is a draft, but it's going to be published in the did-core specification. + +The list of resource parameters with descriptions is the following: + +- `resourceUri` (required): A string or a map that conforms to the rules of [RFC3986] for URIs which SHOULD directly lead to a location where the resource can be accessed from. +- `resourceCollectionId` (optional): A string that conforms to a method-specific unique identifier format. +- `resourceId` (optional): A string that conforms to a method-specific unique identifier format. +- `resourceName` (required): A string that uniquely names and identifies a resource. This property, along with the resourceType below, can be used to track version changes within a resource. +- `resourceType` (required): A string that identifies the type of resource. This property, along with the `resourceName` above, can be used to track version changes within a resource. Not to be confused with the media type. (TBC to add to DID Spec Registries) +- `resourceVersionId` (optional): A string that uniquely identifies the version of the resource provided by the resource creator as a tag. +- `mediaType` (required): A string that identifies the IANA-registered Media Type for a resource. +- `created` (required): A JSON String serialized as an XML DateTime normalized to UTC 00:00:00 and without sub-second decimal precision. +- `checksum` (optional): A string that provides a checksum (e.g. SHA256, MD5) for the resource to facilitate data integrity. +- `previousVersionId` (optional): The value of the property MUST be a string. This is the previous version of a resource with the same resourceName and resourceType. The value must be 'null' if there is no previous version. +- `nextVersionId` (optional): The value of the property MUST be a string. This is the previous version of a resource with the same resourceName and resourceType. The value must be 'null' if there is no previous version. + +This specification describes many important aspects: + +- the list of the query parameters in the DID URL for dereferencing the resource and error messages, +- DID Method and VDR requirements, and +- DID Resolver requirements + +#### Positive Consequences + +- interoperability: the resource is resolved in a standard way according to the ToIP specification following W3C specification for DID URL dereferencing +- discoverability: the resource defined in DID URL is resolved and fetched dynamically +- scalability: compared to W3C specification, the DID Document is not required to fetch the resource, so instead of 2-3 steps (calls), the resource resolution should be completed in a single step. The behavior must be described in the DID Method and implemented by the DID resolver. +- trust: publishing the `checksum` of the resource inside of the DID Document allows other SSI system to check the resource validity. + +#### Negative Consequences + +- scalability: the specification is inspired by the Cheqd approach to store the linkedResourceMetadata inside of the DID Document. ToIP specification describes this functionality as optional ("Through associating the resource with a DID Document, the DID Document may generate associated metadata about the resource") +- complexity: the specification is the most complex to fetch the resource, so it's not trivial to implement it for all DIDs in the SSI ecosystem. +- specification: the resolution logic of resources must be described in the DID method and implemented in the DID resolver. As a consequence of this approach, the solution must either communicate directly with the DLT or rely on the SaaS layer for fetching the resources. + +#### Out of the Scope + +- longevity, and technology stack are not specified in this solution but must be guaranteed by the underlying DLT + + +### RootsID - Cardano AnonCreds (Implementation of ToIP at the Cardano stack) +RootsID adopted the AnonCreds specification to store the credential schema and credential definition on the Cardano blockchain. + +Links to the implementation and the method description are in the #Links section for this ADR + +It is a proof of concept implementation of ToIP specification of DID URL dereferencing for resolving the resources linked to the DID in TypeScript and Python using Blockfrost REST API to the Cardano blockchain. The Blockfrost SaaS middle layer is used for publishing and fetching the Tx from the Cardano blockchain. + +The solution is limited to storing AnonCreds entities only but can be extended to store the general resources. + +For more details, please refer to the source code. + +As the solution is based on the latest ToIP specification, it derives all positive and negative consequences from the previous but contains the concrete implementation for the Cardano blockchain that solves the trust and longevity aspects of the technical solution. + +#### Positive Consequences + +- interoperability: the resource is resolved in a standard way according to the ToIP specification following W3C specification for DID URL dereferencing +- discoverability: the resource metadata is published in the DID Document +- trust & longevity: is guaranteed by the underlying Cardano blockchain +- technology stack: the solution is leveraging Blockfrost REST API for communicating with the Cardano blockchain +- technology stack: the solution is stateless and is much cheaper in terms of the infrastructure cost +- technology stack: the solution is implemented in Python and TypeScript (mobile platforms can use the same approach as well) + + +#### Negative Consequences +- scalability: the specification is inspired by the Cheqd approach to store the linkedResourceMetadata inside of the DID Document +- the convention for references and the logic must be carefully reviewed: + - `schemaId` in this solution is `{didRef}/resources/{cardano_transaction_id}`, so it doesn't refer to the `id` but to the Tx where everything else is stored (it's an interesting idea for a stateless design) + - resource metadata is built according to the ToIP specification but for AnonCreds entities only: credential schema and credential definition. +- technology stack: it doesn't fit to current platform, but can be used for inspiration. + + +### Hyperledger AnonCreds + +According to the AnonCreds specification, such kinds of resources as credential schema and credential definition are stored on-chain. Indy blockchain is used by the Hyperledger technology stack. + +The credential schema and definition are not signed by the issuer, but the transaction with the underlying resource is published by the issuer. So, the integrity of the resource is guaranteed by the fact that it's published inside of the transaction signed by the issuer. + +Example of the credential schema transaction: + +``` +{ + "txn": { + "data": { + "data": { + "attr_names": [ + "birthlocation", + "facephoto", + "expiry_date", + "citizenship", + "name", + "birthdate", + "firstname", + "uuid" + ], + "name": "BasicIdentity", + "version": "1.0.0" + } + }, + "metadata": { + "digest": "06bf8a90335563826154700bf80003598932c8ffaffd4f3656fd8ed604bbb639", + "endorser": "Ar1YzNwcM74M2Z4XKUWXMW", + "from": "Y6LRXGU3ZCpm7yzjVRSaGu", + "payloadDigest": "44e0181c9f9d5080434f9bf11801f1b0768a6b985195e14d56e5dab06fde0cb8", + "reqId": 1632381635230531300, + "taaAcceptance": { + "mechanism": "at_submission", + "taaDigest": "8cee5d7a573e4893b08ff53a0761a22a1607df3b3fcd7e75b98696c92879641f", + "time": 1632355200 + } + }, + "protocolVersion": 2, + "type": "101", + "typeName": "SCHEMA" + }, + "txnMetadata": { + "seqNo": 73904, + "txnId": "Y6LRXGU3ZCpm7yzjVRSaGu:2:BasicIdentity:1.0.0", + "txnTime": "2021-09-23T07:20:40.000Z" + } +} +``` + +The resource (credential schema) in the current example can be discovered using Indy SDK by the following id: +``` +Y6LRXGU3ZCpm7yzjVRSaGu:2:BasicIdentity:1.0.0 +``` + +Technical details and flows are described in the [AnonCreds](https://hyperledger.github.io/anoncreds-spec/) specification. + +#### Positive Consequences + +- discoverability: the credential schema or any other resource is discovered using the Indy SDK +- scalability and longevity: are guaranteed by the underlying blockchain technology +- trust: is achieved by underlying blockchain technology, the transaction with the resource contains the hashes and contains the `digest`, `taaDigest`, and `payloadDigest` fields + +#### Negative Consequences + +- interoperability: the current solution for storing the resources on-chain is coupled with the Indy blockchain and SDK (it will be mitigated by decoupling the AnonCreds specification from the Indy blockchain) +- vendor lock: the solution is tightly coupled to the Indy blockchain (it will be mitigated in the future by decoupling the Aries project from the underlying Indy blockchain) + +**Note**: there is a new specification of the AnonCreds that is decoupled from the Hyperledger stack. The specification can describe more details about the resource publishing. + +### Trinsic Solution + +The Trinsic solution is built on top of the Hyperledger Aries platform on the Indy blockchain +The main benefit is the Trinsic application layer that defines the domain models, entities, REST API and SDK for working with these. + +The resource, such as credential schema, is stored on-chain, but the technical complexity and low-level details are hidden under `Template` and [`Template Service`](https://docs.trinsic.id/reference/services/template-service/#template-service) + +#### Positive & Negative Consequences + +Are similar to the Hyperledger AnonCreds solution + +The main benefit of the Trinsic approach to storing resources is a good abstraction layer, documentation, REST API and a variety of supported programming languages in SDKs for dealing with underlying resources. + + +### Solution #1 (W3C with dynamic resource resolution) + +The solution for storing the resources linked to the DID depends on two decisions that are described in the Context and Problem Statement: + +- where the resource is stored +- how the resource is discovered and fetched + +Taking into account the advantages and disadvantages of the existing solutions the decision about the solution for the Identus platform might be the following: + +-the resource is linked to the DID by convention specified in the W3C specification, so specifying the resource in the DID URL and defining the service endpoint that exposes the resource allows to discover and fetch the resource using the Universal Resolver +- as an option, the same resource can be discovered and fetched by the Identus platform backend and SDK without loading the Universal resolver +- the resource integrity must be guaranteed by one of the following options: + - by signing the payload with one of the DID's keys or + - by publishing the resource metadata that contains the information about the resource (id, type, name, media type, hash) on-chain or + - for the resource that is less than the blockchain limitation (up to 64KB) by publishing the resource together with the hash, and/or signature +- the resource can be stored in the cloud storage - PostgreSQL database - for indexing and lookup API + +As the Identus platform can leverage the Cardano blockchain and there is a strong requirement for longevity and security - the resource together with the signature and/or hash must be stored in the Cardano blockchain. + +An example of this solution will be the following (concerning the current infrastructure and services): + +- prism-node must be able to store the generic resource payload, signature and/or hash on-chain and restore the given resource in the underlying database (PostgreSQL) for indexing and lookup API +- credential schema (or any other resource module) must be a part of the Atala SSI infrastructure and allow + - publishing the concrete resource as a generic resource using the prism-node API + - expose the API for discovery and fetching the resource by URL + - expose the API for managing the resources (create, publish, lookup with pagination) +- the Universal Resolver for the DID Method must be able to discover and fetch the resource by DID URL +- is needed, SDK and backend services can fetch the resources directly (not via the Universal Resolver) + +Example: + +Given the credential schema with the signature: + +``` +{ + "$id": "driving-license-1.0", + "$schema": "http://json-schema.org/draft-07/schema#", + "description": "Driving License", + "type": "object", + "properties": { + "credentialSubject": { + "type": "object", + "properties": { + "emailAddress": { + "type": "string", + "format": "email" + }, + "givenName": { + "type": "string" + }, + "familyName": { + "type": "string" + }, + "dateOfIssuance": { + "type": "datetime" + }, + "drivingLicenseID": { + "type": "string" + }, + "drivingClass": { + "type": "integer" + }, + "required": [ + "emailAddress", + "familyName", + "dateOfIssuance", + "drivingLicenseID", + "drivingClass" + ], + "additionalProperties": true + } + } + }, + "proof": { + "type": "RsaSignature2018", + "created": "2023-04-18T10:30:00Z", + "jws": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6Imh0dHBzOi8vZXhhbXBsZS5jb20vY3JlZGVudGlhbHMvc3ViamVjdCIsInR5cGUiOiJWZXJpZmljYWxpY0NyZWRlbnRpYWwiLCJpc3N1ZXIiOiJodHRwczovL2V4YW1wbGUuY29tL2lzc3VlciIsImlzc3VlckRhdGUiOiIyMDIzLTA0LTE4VDEwOjMwOjAwWiIsImV4cGlyYXRpb25EYXRlIjoiMjAyNC0wNC0xOFQxMDowOTo1MFoiLCJjcmVkZW50aWFsU3ViamVjdCI6eyJpZCI6Imh0dHBzOi8vZXhhbXBsZS5jb20vY3JlZGVudGlhbHMvc3ViamVjdC9zdWJqZWN0IiwibmFtZSI6IkpvaG4gRG9lIiwic2lnbmF0dXJlIjoxMH19.OesuS6eC0gVh8SZpESM7Z4Yln9sGSsJHQ8s0LlcsD99H6_7U6vukUeT2_GZTtuTf9SwIfdtgViFTOfhzGTyM6oMGEeUJv6Umlh6TQ1fTm9XEDQV7JDBiaxRzV7S_vS6i", + "alg": "RS256" + } +} +``` + +In order to store it as a general resource on-chain, it should be binary encoded into CBOR format (or Base64 encoded string) and the metadata must be added to it. + +For example, it might look like the following JSON object: + +``` +{ + "id": "f3d39687-69f5-4046-a960-3aae86a0d3ca", + "name": "DrivingLicense-1.0", + "resourceType": "CredentialSchema", + "mediaType": "application/json", // MIME Type of the resource + "data": "SGVsbG8sIHdvcmxk", // base 64 encoded or CBOR file + "did": "did:prism:abdcefg", // the DID reference to link the resource to the DID and create the anchor to the DID +} +``` + +... and published on the Cardano blockchain as a payload of the AtalaOperation object, so can be retrieved from the blockchain and added to the indexed database for resolution by the REST API + +Given there is an Agent or CredentialSchema service that exposes the REST API for fetching the credential schema by ID (in the current implementation it corresponds to the Cloud Agent `/schema-registry/credential-schema/{uuid}`, but later might be changed to `/credential-schema/{didRef}/{id}?version={version}` ) + +So, the services of the Identus platform and SDKs can resolve the given schema by URL and use the convenient lookup API with filtering and pagination to manage the credential schema in the Web application. + +To define the `schemaId` in the message of Issue Credential and Present Proof protocols the following DID URL can be used: + +``` +did:prism:abcdefg1234567890?service=credentialschema&relativeRef=%2Ff3d39687-69f5-4046-a960-3aae86a0d3ca +``` + +The version is skipped as for resolving the single resource we don't need a `version` parameter +`f3d39687-69f5-4046-a960-3aae86a0d3ca` - is a unique identifier that is derived from the triple: didRef, id and version. + +So, having the following service endpoint definition in the DID Document: + + +``` +{ + "service": [ + { + "id": "did:prism:abcdefg#credentialschema", + "type": "CredentialSchemaService", + "serviceEndpoint": "https://agent.example.com/schema-registry/schemas" + } + ] +} +``` + +And having the logic for dereferencing the DID URL in the PRISM DID Resolver, any system in the SSI ecosystem can fetch this resource and validate its authorship. + +Storing resources larger than 64KB is out of the scope of this ADR. These must be stored in a slightly different way, for instance, the image ~10MB, can be stored and linked to the DID Document in the following way: + +- the image is stored in the cloud database in a binary format +- the metadata and the hash of the image are stored on-chain +- optionally, the signature of the owner DID can be generated for the payload and stored together with the hash +- to prove the integrity of the image file, the hash of the binary representation must be the same and/or the signature must be verified +- the resource can be fetched in the same way and the credential schema from the previous example + +#### Positive Consequences + +- discoverability: the credential schema or any other resource is discovered using the Universal Resolver +- scalability: the size of DID document doesn't grow when a new resource is added, the number of the resources is limited by the scalability of the underlying database and the blockchain +- longevity: for the resource that can be stored on-chain the longevity is 100% guaranteed by the underlying blockchain technology +- trust: is achieved by cryptography and the underlying blockchain technology, the transaction with the resource contains the hashes and the signatures that can be verified +- interoperability: any system that uses Universal Resolver can fetch the resource by dereferencing the DID URL (following the W3C specification) +- vendor lock: by publishing the specifications and the algorithms for fetching the data, the resource can be resolved by any other SSI system + +#### Negative Consequences + +- longevity: for the resource that can not be stored on-chain because of the large size longevity is guaranteed by the cloud recovery procedures and data backup. As an option for mitigating this problem, the resource can be stored in IPFS (additional ADR is required for this) +- vendor lock: the solution is coupled to the Cardano blockchain + +**NOTE:** one of the main concerns of this ADR is storing the resources on-chain because of size limitation, throughput, latency and cost. This option allows to postpone this decision and implement the DID-linked resources without the need of storing resources on-chain. + +--- + +### Solution #2 (ToIP specification implementation) + +ToIP specification can be used to implement the resource resolution. +To implement it the following things are required: + +- specify in the DID method the logic of resolution of the resources from the DID URL +- specify the service mapping in the DID method and implement the resource resolution logic in the DID resolver +- add `didDocumentMetadata.linkedResourceMetadata` field to the DID method and implement the logic in the VDR layer +- implement the service layer according to the ToIP specification + +ToIP solution specifies the requirements to the VDR (blockchain) that is not easy to achieve with the current implementation of the Identus platform. +According to this specification, the Universal Resolver must have the direct access to the blockchain or use a centralized layer for fetching the resources over REST API. +Before implementing this specification is the Identus platform we need to answer the following questions: + +- who is hosting the `prism-node` infrastructure for the Universal Resolver and how it's managed? +- should we make the PRISM DID Method responsible for resource resolution logic? + +#### Positive Consequences + +- interoperability: the resource is resolved in a standard way according to the ToIP specification following W3C specification for DID URL dereferencing +- discoverability: the resource defined in DID URL is resolved and fetched dynamically +- scalability: compared to W3C specification, the DID Document is not required to fetch the resource, so instead of 2-3 steps (calls), the resource resolution should be completed in a single step. The behavior must be described in the DID Method and implemented by the DID resolver. + +#### Negative Consequences + +- complexity: the solution is complex and over-engineered. A lot of components and flows must be defined to fetch the resource. +- specification: current approach might be changed as it's still in the draft status, so implementing it is risky + +## Decision Outcome + +Each option has technical challenges and limitations, but it's possible to define the following decisions as an outcome: + +- the resource MUST be stored on-chain to guarantee trust and longevity aspects, for the Identus platform it is the Cardano blockchain +- the resource SHOULD be indexed for quick lookup over the API +- the resource CAN be referenced in the DID Document for additional discoverability +- the resource MUST be dereferenced from the DID URL according to W3C or ToIP specification and implementation +- the resource resolution CAN be described in the DID Method (for the dynamic resource linking and W3C dereferencing algorithm it's not required) +- the complexity of the solution SHOULD be adequate to the original goal: get the resource linked to the DID +- the solution SHOULD be scalable +- the solution MUST be interoperable and easily adopted by the SSI ecosystem + +The solution option #1 is considered a good option as it satisfies the requirements and the majority of the negative consequences are mitigated. +The following comparison table is a summary of the available options. + +| Option | Simplicity | Trust | Scalability | Interop | Discoverability | Decentalisation | +|--------------------------------------------|--------------------------------------|--------------------|--------------------------------------|--------------------------------------|--------------------|--------------------| +| linkedResources field | :heavy_plus_sign: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_plus_sign: | N/A | +| linkedResourceMetadata (Cheqd) | :heavy_minus_sign:/:heavy_plus_sign: | :heavy_check_mark: | :heavy_minus_sign:/:heavy_plus_sign: | :heavy_plus_sign: | :heavy_plus_sign: | :heavy_check_mark: | +| DID URL Dereferencing (W3C specification) | :heavy_plus_sign: | N/A | :heavy_plus_sign: | :heavy_plus_sign: | :heavy_minus_sign: | :heavy_check_mark: | +| DID URL Dereferencing (ToIP specification) | :heavy_minus_sign: | :heavy_check_mark: | :heavy_plus_sign:/:heavy_minus_sign: | :heavy_plus_sign:/:heavy_minus_sign: | :heavy_plus_sign: | :heavy_check_mark: | +| RootsID - Cardano AnonCreds | :heavy_plus_sign: | :heavy_check_mark: | :heavy_plus_sign:/:heavy_minus_sign: | :heavy_plus_sign: | :heavy_plus_sign: | :heavy_check_mark: | +| Hyperledger AnonCreds | :heavy_plus_sign: | :heavy_check_mark: | :heavy_plus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_check_mark: | +| Trinsic | :heavy_minus_sign: | :heavy_check_mark: | :heavy_plus_sign:/:heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_check_mark: | +| Solution #1 W3C | :heavy_plus_sign: | :heavy_check_mark: | :heavy_plus_sign: | :heavy_plus_sign: | :heavy_minus_sign: | :heavy_check_mark: | +| Solution #2 ToIP | :heavy_minus_sign: | :heavy_check_mark: | :heavy_minus_sign:/:heavy_plus_sign: | :heavy_plus_sign:/:heavy_minus_sign: | :heavy_plus_sign: | :heavy_check_mark: | + +--- + +Each option reviewed in this ADR is a composition of the following architectural decisions: + +- the resource is stored on-chain or off-chain +- the VDR layer is managed or unmanaged (for instance, leveraging the Blockfrost REST API can simplify the solution, but might be expensive at scale) +- domestic or official (W3C/ToIP specification implementation +- static/dynamic resource discoverability (is resource metadata stored in the DID Document or not) +- DID URL dereferencing algorithm and naming convention +- level of trust: Tx signature, resource hash, resource signature +- decentralized or SaaS solution +- SDK, Universal Resolver or REST API for fetching the resource + +The main benefits of option #1 for the Identus platform are the following: + +- the resource is stored on-chain +- the resource is published and indexed by the managed VDR layer (prism-node) +- the resource is available via REST API & SDK for the product-level applications +- the resource is dereferenced via the DID URL in the DID resolver +- the resource is linked to the DID dynamically (using DID URL + dereferencing algorithm) +- this solution is scalable and decentralized (anyone can deploy the Identus stack) +- level of trust can be guaranteed by the underlying VDR and enforced by hashes or signatures of the resource + + +## Links + +- [Our Approach to DID-Linked Resources](https://blog.cheqd.io/our-approach-to-resources-on-ledger-25bf5690c975) +- [Context for Developing DID-Linked Resources](https://docs.cheqd.io/identity/guides/did-linked-resources/context) +- [ADR 002: DID-Linked Resources](https://docs.cheqd.io/identity/architecture/adr-list/adr-002-did-linked-resources) +- [Hyperledger Anoncreds - Schema Publisher: Publish Schema Object](https://hyperledger.github.io/anoncreds-spec/#schema-publisher-publish-schema-object) +- [ToIP - DID URL Resource Parameter Specification](https://wiki.trustoverip.org/display/HOME/DID+URL+Resource+Parameter+Specification) +- [ToPI - DID-Linder Resources Specification](https://wiki.trustoverip.org/display/HOME/DID-Linked+Resources+Specification) +- [DID-Core#did-parameters](https://www.w3.org/TR/did-core/#did-parameters) +- [DID-Resolution#dereferencing](https://w3c-ccg.github.io/did-resolution/#dereferencing) +- [RootsID AnonCreds Methods](https://github.com/roots-id/cardano-anoncreds/blob/main/cardano-anoncred-methods.md) +- [RootsID Cardano AnonCreds repo](https://github.com/roots-id/cardano-anoncreds) + diff --git a/documentation/adrs/decisions/20230509-message-routing-for-multi-tenant.md b/documentation/adrs/decisions/20230509-message-routing-for-multi-tenant.md new file mode 100644 index 000000000..c5f67194c --- /dev/null +++ b/documentation/adrs/decisions/20230509-message-routing-for-multi-tenant.md @@ -0,0 +1,58 @@ +# Routing Requests to the Correct Tenant + +- Status: accepted +- Deciders: Yurii Shynbuiev,David Poltorak, Benjamin Voiturier, Shailesh Patil +- Date: [2023-05-09] +- Tags: multi-tenant, routing, message + +## Context and Problem Statement +The Cloud Agent in multi-tenancy is still a single agent running, however, some of the resources are now shared between the tenants of the agent. +Each tenant has their own keys, with their own DIDs, connections. Transports and most of the settings are still shared between agents. +All the API endpoints are same from outside + +Multi-tenancy, message routing can be used to ensure that messages are delivered only to the intended recipient or tenants, and not to unauthorized tenants. + +Backend services: Cloud Agent use PostgreSQL. Authentication and authorization +
+    sequenceDiagram
+    autonumber
+    actor H as Holder(DidComm)
+    actor T as Tenant(Issuer)
+    participant A as CloudAgent
+    participant W as Wallet
+    participant DB as Database[did \<- tenantId]
+    T->>A: Register Tenant
+    activate A
+    A->>W: Create wallet
+            activate W
+                note over W: Each Tenant has his own wallet where keys and dids are stored
+                W-->>A: tenantId
+            deactivate W
+            note over T, A: Subsequent requests include JWT header
+            activate DB
+                note over DB: did -> tenantId or did -> walletId
+                T->>A: Create PeerDID[JWT Header]
+                A->>A: authorised token extract tenantID
+                alt JWT validation
+                    A-->>T: 200 OK & JWT
+                else No user
+                    A-->>T: 401 Unauthorized
+                end
+                T-->>A: If authorised Create PeerDID
+                A-->>DB: Update [DID(PeerDID) -> tenantID]
+                A->>H: send DIDCOMM message to holder did
+            deactivate DB
+    deactivate A
+    activate H
+        H->>A: DIDCOMMV2 message to Agent(did)
+        A-->>DB:lookup to Agent DID identify tenantId
+        A-->>A:decrypt message
+    deactivate H
+
+ + -``` - -> Unfortunately, this diagram won't be automatically rendered in your preview mode. -> So, you could debug using github mermaid diagrams, but then integrate the code above in your ADR. +This website is automatically updated after a change on the `main` branch of the project's Git repository. +In fact, the developers manage this documentation directly with markdown files located next to their code, so it is more convenient for them to keep it up-to-date. +You can browse the ADRs by using the left menu or the search bar. ## More information @@ -53,3 +35,4 @@ To successfully render Mermaid diagrams on the server side, add the following co - [Log4brains documentation](https://github.com/thomvaill/log4brains/tree/master#readme) - [What is an ADR and why should you use them](https://github.com/thomvaill/log4brains/tree/master#-what-is-an-adr-and-why-should-you-use-them) - [ADR GitHub organization](https://adr.github.io/) + diff --git a/documentation/adrs/adr.md b/documentation/adrs/adr.md deleted file mode 100644 index 3d53cfd2f..000000000 --- a/documentation/adrs/adr.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -id: adr -title: Architecture knowledge base ---- - - -# Architecture knowledge base - -Welcome 👋 to the architecture knowledge base of the Identus platform. - -You will find here all the Architecture Decision Records (ADR) of the project. - -The introduction of ADRs was approved in [RFC-0016](https://input-output.atlassian.net/wiki/spaces/ATB/pages/3580559403/RFC+0016+-+Use+Architectural+Design+Records) - -Engeering guidance on creating and managing ADRs can be found [here](https://input-output.atlassian.net/wiki/spaces/AV2/pages/3599237263/Architectural+Decision+Records+ADRs) - -## Definition and purpose - -> An Architectural Decision (AD) is a software design choice that addresses a functional or non-functional requirement that is architecturally significant. -> An Architectural Decision Record (ADR) captures a single AD, such as often done when writing personal notes or meeting minutes; the collection of ADRs created and maintained in a project constitutes its decision log. - -An ADR is immutable: only its status can change (i.e. become deprecated or superseded). That way, you can become familiar with the whole project history just by reading its decision log in chronological order. -Moreover, maintaining this documentation aims at: - -- 🚀 Improving and speeding up the onboarding of a new team member -- 🔭 Avoiding blind acceptance/reversal of a past decision (cf [Michael Nygard's famous article on ADRs](https://cognitect.com/blog/2011/11/15/documenting-architecture-decisions.html)) -- 🤝 Formalizing the decision process of the team - -## Usage - -This website is automatically updated after a change on the `main` branch of the project's Git repository. -In fact, the developers manage this documentation directly with markdown files located next to their code, so it is more convenient for them to keep it up-to-date. -You can browse the ADRs by using the left menu or the search bar. - -## More information - -- [RFC-0016](https://input-output.atlassian.net/wiki/spaces/ATB/pages/3580559403/RFC+0016+-+Use+Architectural+Design+Records) -- [Engineering Guidance](https://input-output.atlassian.net/wiki/spaces/AV2/pages/3599237263/Architectural+Decision+Records+ADRs) -- [Log4brains documentation](https://github.com/thomvaill/log4brains/tree/master#readme) -- [What is an ADR and why should you use them](https://github.com/thomvaill/log4brains/tree/master#-what-is-an-adr-and-why-should-you-use-them) -- [ADR GitHub organization](https://adr.github.io/) - diff --git a/documentation/adrs/sidebars.js b/documentation/adrs/sidebars.js index 78d05911c..e431adfbf 100644 --- a/documentation/adrs/sidebars.js +++ b/documentation/adrs/sidebars.js @@ -20,7 +20,6 @@ const sidebars = { tutorialsSidebar: [ "README", - "adr", "template", { type: 'category', diff --git a/identus-cloud-agent b/identus-cloud-agent index 758fe87cb..f999f303f 160000 --- a/identus-cloud-agent +++ b/identus-cloud-agent @@ -1 +1 @@ -Subproject commit 758fe87cb3c729b544a1df434c23d535162cbba9 +Subproject commit f999f303f876d97287f217e00bdbc1bbcd193495 diff --git a/identus-edge-agent-sdk-ts b/identus-edge-agent-sdk-ts index 2be9710b3..2afbbc1e3 160000 --- a/identus-edge-agent-sdk-ts +++ b/identus-edge-agent-sdk-ts @@ -1 +1 @@ -Subproject commit 2be9710b3d0f8e8b98bb990f47462c1a88654948 +Subproject commit 2afbbc1e32977cc62a0211189cf9ecb8b593f228 From 658181550b1bf3f2ddadd5f17534bc46c5c6f2fb Mon Sep 17 00:00:00 2001 From: FabioPinheiro Date: Mon, 12 Aug 2024 16:06:23 +0100 Subject: [PATCH 9/9] identus-cloud-agent Signed-off-by: FabioPinheiro --- identus-cloud-agent | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/identus-cloud-agent b/identus-cloud-agent index f999f303f..758fe87cb 160000 --- a/identus-cloud-agent +++ b/identus-cloud-agent @@ -1 +1 @@ -Subproject commit f999f303f876d97287f217e00bdbc1bbcd193495 +Subproject commit 758fe87cb3c729b544a1df434c23d535162cbba9