diff --git a/weaver/core/drivers/corda-driver/gradlew.bat b/weaver/core/drivers/corda-driver/gradlew.bat index 24467a141f..9618d8d960 100644 --- a/weaver/core/drivers/corda-driver/gradlew.bat +++ b/weaver/core/drivers/corda-driver/gradlew.bat @@ -1,100 +1,100 @@ -@rem -@rem Copyright 2015 the original author or authors. -@rem -@rem Licensed under the Apache License, Version 2.0 (the "License"); -@rem you may not use this file except in compliance with the License. -@rem You may obtain a copy of the License at -@rem -@rem https://www.apache.org/licenses/LICENSE-2.0 -@rem -@rem Unless required by applicable law or agreed to in writing, software -@rem distributed under the License is distributed on an "AS IS" BASIS, -@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -@rem See the License for the specific language governing permissions and -@rem limitations under the License. -@rem - -@if "%DEBUG%" == "" @echo off -@rem ########################################################################## -@rem -@rem Gradle startup script for Windows -@rem -@rem ########################################################################## - -@rem Set local scope for the variables with windows NT shell -if "%OS%"=="Windows_NT" setlocal - -set DIRNAME=%~dp0 -if "%DIRNAME%" == "" set DIRNAME=. -set APP_BASE_NAME=%~n0 -set APP_HOME=%DIRNAME% - -@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" - -@rem Find java.exe -if defined JAVA_HOME goto findJavaFromJavaHome - -set JAVA_EXE=java.exe -%JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto init - -echo. -echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:findJavaFromJavaHome -set JAVA_HOME=%JAVA_HOME:"=% -set JAVA_EXE=%JAVA_HOME%/bin/java.exe - -if exist "%JAVA_EXE%" goto init - -echo. -echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:init -@rem Get command-line arguments, handling Windows variants - -if not "%OS%" == "Windows_NT" goto win9xME_args - -:win9xME_args -@rem Slurp the command line arguments. -set CMD_LINE_ARGS= -set _SKIP=2 - -:win9xME_args_slurp -if "x%~1" == "x" goto execute - -set CMD_LINE_ARGS=%* - -:execute -@rem Setup the command line - -set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar - -@rem Execute Gradle -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% - -:end -@rem End local scope for the variables with windows NT shell -if "%ERRORLEVEL%"=="0" goto mainEnd - -:fail -rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of -rem the _cmd.exe /c_ return code! -if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 -exit /b 1 - -:mainEnd -if "%OS%"=="Windows_NT" endlocal - -:omega +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto init + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto init + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:init +@rem Get command-line arguments, handling Windows variants + +if not "%OS%" == "Windows_NT" goto win9xME_args + +:win9xME_args +@rem Slurp the command line arguments. +set CMD_LINE_ARGS= +set _SKIP=2 + +:win9xME_args_slurp +if "x%~1" == "x" goto execute + +set CMD_LINE_ARGS=%* + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/weaver/core/network/fabric-interop-cc/README.md b/weaver/core/network/fabric-interop-cc/README.md index 69a625d5fc..e1da96f2c4 100644 --- a/weaver/core/network/fabric-interop-cc/README.md +++ b/weaver/core/network/fabric-interop-cc/README.md @@ -3,61 +3,61 @@ SPDX-License-Identifier: CC-BY-4.0 --> -# Fabric Interoperability Contracts - -## Summary - -- The Fabric interoperability contracts handle the dual process of servicing requests for views from external networks, and verifing requested views for integrity. -- These contracts allow networks to define policies that control which external entities can access what objects on the ledger, and policies that determine what constitutes a valid proof. -- These contracts impose no impact on application contracts. - -## Installation - -- `make protos-local` - Copy the latest protos directory from main folder and update local copy of fabric-protos directory -- `make build` - Build the chaincode binary -- `make` - `make protos-local` and `make build` -- `make clean` - Deletes the binary - -## Testing - -Run all the tests with: - -`go test` - -(or `go test -v` for verbose logging) - -## Usage - -Once you have built the chaincode (by running `make`), the following command will run the chaincode: - -`CORE_CHAINCODE_LOGGING_LEVEL=debug CORE_PEER_ADDRESS=localhost:7052 CORE_CHAINCODE_ID_NAME=mycc:v0 CORE_PEER_TLS_ENABLED=false ./bin/interop -peer.address localhost:7052` - -With the chaincode process running, you can shell into a local fabric network (see below for sample network) to use the chaincode - -```bash -docker exec -it cli bash -# Since we are not using the installed chaincode, this path can be to any valid chaincode -peer chaincode install -n mycc -v v0 -l golang -p /opt/gopath/src/chaincodedev/chaincode/asset-transfer-basic -peer chaincode list --installed -peer chaincode instantiate -n mycc -v v0 -l golang -c '{"Args":["initLedger","applicationCCID"]}' -C myc -o orderer:7050 -``` - -The chaincode can then be invoked with the following examples: - -```bash -peer chaincode invoke -n mycc -c '{"Args":["GetApplicationID"]}' -C myc -``` - -The chaincode can be used with any Fabric 2.0 network that has a peer running in development mode. However, we have provided a very simple [Fabric network](https://github.com/airvin/fabric-network/tree/fabric-2) for testing purposes. If you would like to use this network, start the Fabric network with the peer in development mode and without a chaincode container. This can be done with the `./start-no-cc.sh` script. - -## Servicing a Remote View Request - -Describe process of handling a remote view request. - - - -## Verifying a Requested View - -Describe process of requesting and verifying a remote view. - - +# Fabric Interoperability Contracts + +## Summary + +- The Fabric interoperability contracts handle the dual process of servicing requests for views from external networks, and verifing requested views for integrity. +- These contracts allow networks to define policies that control which external entities can access what objects on the ledger, and policies that determine what constitutes a valid proof. +- These contracts impose no impact on application contracts. + +## Installation + +- `make protos-local` - Copy the latest protos directory from main folder and update local copy of fabric-protos directory +- `make build` - Build the chaincode binary +- `make` - `make protos-local` and `make build` +- `make clean` - Deletes the binary + +## Testing + +Run all the tests with: + +`go test` + +(or `go test -v` for verbose logging) + +## Usage + +Once you have built the chaincode (by running `make`), the following command will run the chaincode: + +`CORE_CHAINCODE_LOGGING_LEVEL=debug CORE_PEER_ADDRESS=localhost:7052 CORE_CHAINCODE_ID_NAME=mycc:v0 CORE_PEER_TLS_ENABLED=false ./bin/interop -peer.address localhost:7052` + +With the chaincode process running, you can shell into a local fabric network (see below for sample network) to use the chaincode + +```bash +docker exec -it cli bash +# Since we are not using the installed chaincode, this path can be to any valid chaincode +peer chaincode install -n mycc -v v0 -l golang -p /opt/gopath/src/chaincodedev/chaincode/asset-transfer-basic +peer chaincode list --installed +peer chaincode instantiate -n mycc -v v0 -l golang -c '{"Args":["initLedger","applicationCCID"]}' -C myc -o orderer:7050 +``` + +The chaincode can then be invoked with the following examples: + +```bash +peer chaincode invoke -n mycc -c '{"Args":["GetApplicationID"]}' -C myc +``` + +The chaincode can be used with any Fabric 2.0 network that has a peer running in development mode. However, we have provided a very simple [Fabric network](https://github.com/airvin/fabric-network/tree/fabric-2) for testing purposes. If you would like to use this network, start the Fabric network with the peer in development mode and without a chaincode container. This can be done with the `./start-no-cc.sh` script. + +## Servicing a Remote View Request + +Describe process of handling a remote view request. + + + +## Verifying a Requested View + +Describe process of requesting and verifying a remote view. + + diff --git a/weaver/docs/docs/external/architecture-and-design/decentralized-identity.md b/weaver/docs/docs/external/architecture-and-design/decentralized-identity.md index 385b78158f..a56eab5de2 100644 --- a/weaver/docs/docs/external/architecture-and-design/decentralized-identity.md +++ b/weaver/docs/docs/external/architecture-and-design/decentralized-identity.md @@ -1,7 +1,7 @@ ---- -id: decentralized-identity -title: Decentralized Identity ---- +--- +id: decentralized-identity +title: Decentralized Identity +--- -Interoperation for asset or data transfers/exchanges relies on a message-passing infratructure and pan-network data processing modules, as we have seen in earlier pages. But there is yet another crucial basis these data processing modules need to satisfy our design principles of network independence and avoidance of trust parties. This is the ability of a network as a whole and of its individual members to accurately identify and authenticate another network's members. - -Further, for the networks to remain independent and interact ad hoc with each other, we cannot impose a central authority that unifies their private identity management systems. So the identity basis of interoperation must be decentralized, leading inevitably to the requirement of exchanging identity information across networks as a pre-requisite for asset and data transfers/exchanges. This is illustrated in the figure below where interoperation protocols are classified in two planes (or tiers), data and identity, with the former depending on the latter. - -![alt text](../../../static/architecture-assets/identity-data-planes.jpg) - -- In the __data plane__ lies the protocols that effect the actual exchanges of data and assets. The figure above illustrates a typical data-sharing instance, where the network at the left requests a data record from the right. The latter receives the request via the two relays (not explicitly marked in this diagram) and runs an access control check through consensus in its _interop module_ before responding with the data record and supporting proof. The network at the left receives the data and proof, again via the two relays, and verifies the data using the supplied proof. __Note: since a core part of both request and proof are digital signatures of network members, the ability to identify and authenticate network members is necessary to perform these endpoint functions__. -- Here is where the __identity plane__ enters the picture, as a trust anchor for the data plane. The most general function of this plane is illustrated in the figure, where the networks get each others' identity and configuration (i.e., membership structure and network topology) information. This exchange has as its own trust basis (or dependency) a set of identity providers and verifiers. (_Note_: these identity providers and verifiers may belong to the two networks or they could be external entities.) The outcome of the exchange is a record of the other network's identity and configuration information on one's ledger, which can then be looked up in a data plane protocol instance. - -### Identity Plane: Strawman Approach - -The simplest identity plane protocol involves a direct exchange of identity information between representatives of the two networks: in other words, an API integration. But this approach suffers from the same drawbacks that API integration in the data plane would. It diminishes a blockchain network to a single trusted spokesperson, exposing that network to risk. Even if such a spokesperson could be designated, appropriately framing access control policies for potentially every other blockchain network in the world would be very challenging. This approach is therefore insecure and not scalable, and therefore ought to be treated purely as a strawman. - -### Networks as Groups of Self-Sovereign Members - -A secure and sustainable identity plane plaform can be built on the basis of _self-sovereign identity_ and _decentralized identifiers_. We recognize that: -- Each constituent member of a blockchain network may already possess an identity from a third-party provider -- Membership within a network can be treated as a property of a sovereign organization rather than subordination to a network's governing authority -- DIDs allow members to control who they wish to share their identities with and the levels of exposure -- Network membership lists and individual members' identities can respectively be validated by different providers, thereby maintaining decentralization of infrastructure - -### Distributed Identity Management Infrastructure - -The distributed identity management infrastructure for interoperation is illustrated in the figure below. We assume the existence of one or more _Interoperation Identity Networks (IINs)_ that act as registries and credential validators for the organizations that form the memberships of blockchain networks. - -![alt text](../../../static/architecture-assets/decentralized-id-mgmt-arch.jpg) - -An IIN can be built from scratch to facilitate blockchain interoperation, but it can also be an augmentation of an existing decentralized identity provider or registry. Its purpose is to maintain identity records and credentials for organizations and validate these to third parties as per the desire of the identity or credential owner. In this model, an IIN can itself be reputed or it can bring together many reputed and trusted identity providers (called _stewards_) into a shared network. As long as the members of two blockchain networks have shared trust in one or more IINs, an identity plane exchange can be effected, thereby setting the foundation for data and asset transfers. - -#### Interoperation Identity Network - -The ideal IIN architecture is illustrated in the figure below, inspired by Hyperleder Indy, whose architecture is used in our canonical (or reference) implementation. Built on a DLT itself, an Indy-based IIN can provide the combination of assurance and decentralization that a centralized registry cannot. Further, such an IIN will support the entire SSI and DID standard, maintaining credential schemas and verification keys, and issuing _verifiable credentials_ that can be used for privacy-preserving authentications. - -![alt text](../../../static/architecture-assets/iin.jpg) - -An IIN is modeled as a network with a distributed shared ledger, implemented using an Indy Node Pool and running BFT consensus. The ledger is also (ideally) meant to be publicly accessible, though there is nothing preventing our protocols from working with a private IIN. - -A canonical IIN maintains the following: -- DID records corresponding to organizations that are members of one or more blockchain networks, whose salient attributes include: - * Unique (within an IIN) identifier or _verinym_ for the identity owner - * Service endpoint for the identity owner - * Credential schemas - * Credential definitions (public keys used to authenticate signed credentials) - -Every IIN has a set of bootstrapped _trust anchors_ called _stewards_. A steward can create other trust anchors by issuing them suitable credentials. The trust anchors are the primary identity providers in our distributed identity management architecture. They can be existing reputed authorities or identity providers who are trusted to certify blockchain networks' memberships, or they can be created ad hoc by a consortium representing the members of a blockchain network. - -For one blockchain network to discover and validate another in the identity plane, it must trust one or more IINs and trust anchors who can provide that validation. We envision a shared and mutually reinforcing trust among stewards and other anchors within an IIN. An anchor could gain trust by virtue of joining a well-established IIN. Similarly, an IIN bootstrapped with well-known stewards gains trust because of the collective reputations of those stewards. - -Examples of entities that can act as stewards or trust anchors within IINs: the Sovrin Foundation (an organization dedicated to enabling open-source digital ID management, and which also maintains Indy), companies like Maersk or Walmart that have founded real-world blockchain networks, companies like IBM or R3 that maintain popular DLT platforms. - -IINs don't have to be built on Indy. Alternatives like Sidetree exist, providing similar functionality. There are also various existing DID registries that are already issuing credentials to existing organizations. To the extent possible, we would like to leverage all of this existing infrastructure and not force any network menmber to find yet another identity provider. Therefore, these existing registries or networks can be used as IINs: the only requirement is that they follow the standards for SSI and DIDs and issuing VCs. - -#### Network Membership Credentials - -Two kinds of credentials (each with a schema and a definition) are maintained on an IIN ledger: -1. __Member list__: This contains a network name or ID and a set of DIDs of that network's members. - * This is a per-network credential whose schema and verification key must be maintained on an IIN. - * This is issued by a steward or trust anchor in an IIN and is associated with that steward's or anchor's DID. -2. __Membership__: This contains an oranization's attributes, including the set of IDs of networks to which it belongs. - * This is designed to be an extensible credential, i.e., support more attributes in the future. - * An existing membership credential (of the VC form) can be used as long as it matches the schema recorded on an IIN. - * The issuer must be a steward or trust anchor (or equivalent, if it's a non-Indy registry) in an IIN. - * This is associated with the member's DID. - -#### Identity Info: Units of Exchange - -The IIN is used to discover the membership list of a foreign network and establish the authenticity of its members. Memnbership credentials are independent of blockchain networks. - -But data plane transfers and exchanges require knowledge of in-network identities and certificates, which are issued by a network's membership manager(s) to peers and clients. These are not shared through IINs for several reasons. First, the volume of this information can be quite high and further it is subject to change based on a network's internal needs. Also, a network or its members may not wish to expose all this information to an IIN, which is designed to be publicly accessible. Therefore, it is infeasible or undesirable to shared network-specific credentials via established IINs. Instead, we will enable the _peer-to-peer_ exchange of such credentials after the membership discovery and validation procedure is complete. - -Specifically, the identity information for a network member consists of the set of certificate chains of the membership managers for that particular member (organization). These consist of one or more hierarchies of root and intermediate CA certificates. For example: -- In Fabric, each organization uses one or more MSPs (_membership service providers_), each running one or more root and intermediate Fabric-CA servers. Any network peer belonging to an organization is issued a certificate authorized by one of these certificate chains. To authenticate a network peer or client in a data plane protocol, knowledge of these certificate chains is essential. -- In Corda, the entire network typically consists of a hierarchy of CAs, from a root to multiple _doormen_, and from each doorman to multiple nodes. Finally, the certificates used to sign transactions are issued by the node CAs. Therefore, knowledge of the root, doormen, and node CA certificates is essential for authenticating signers. - -More generally, each unit of exchange corresponding to a network member is a _Security Group_, so-called because each network member is an independent organization in its own right with a security domain. - -#### IIN Agents as Member Representatives - -Every network member needs a proxy (either an abstraction or a separate module) for communication with IINs and with the members of foreign networks in the identity plane. We use the term "IIN Agent" to refer to this proxy, and illustrate its functioning as a module within a Fabric network through the reference diagram below. - -![alt text](../../../static/architecture-assets/iin-augmented-network.jpg) - -In the reference implementation, IIN Agents are built as Hyperledger Aries nodes and communicate with each other and with IIN stewards and trust anchors using the Aries protocol. (IIN stewards and trust anchors are also built as Aries nodes.) - -The list of trusted IINs is recorded on the local network's shared ledger, as illustrated in the figure (and thereby agreed through network consensus). To be able to interoperate with another network, the latter's members must have identity records maintained by sume subset of these trusted IINs and their VCs must be issued by these IINs stewards and trust anchors. - -#### Protocols - -Let us consider a scenario where _NETWORK 1_ and _NETWORK 2_ wish to interoperate, and their respective member organizations are as follows: -- _NETWORK 1_: Org3, Org4, Org5 -- _NETWORK 2_: Org1, Org2 - -Each network discovers the other's member list and obtains and records ech member's security group to the local shared ledger. We can enumerate these as follows: -- _NETWORK 1_ discovers and registers _NETWORK 2_:Org1 -- _NETWORK 1_ discovers and registers _NETWORK 2_:Org2 -- _NETWORK 2_ discovers and registers _NETWORK 1_:Org3 -- _NETWORK 2_ discovers and registers _NETWORK 1_:Org4 -- _NETWORK 2_ discovers and registers _NETWORK 1_:Org5 - -Each of these can be done in parallel and each discovery and registration operation is idempotent as long as the security group of a network member does not change. - -The high-level workflow for discovery and registration is illustrated below (using _NETWORK 2_ as the seeker and _NETWORK 1_ as the provider). - -![alt text](../../../static/architecture-assets/protocol-identity-overview.jpg) - -(_Note_: "Network unit" is synonymous with "network member") - -Prerequisites for this process are: -- The member list credential of _NETWORK 1_ is provided by a steward or trust anchor in a particular IIN which is also on the trust list recorded in the ledger of _NETWORK 2_. -- The membership credentials for both organizations in _NETWORK 1_ are supplied by one or more IINs that are on the trust list of _NETWORK 2_. -- Each of the 5 organizations (2 in _NETWORK 1_ and 3 in _NETWORK 2_) has an IIN Agent running on their behalf. - -Let us take the example of _NETWORK 2_ having already obtained the security group info for Org4 and Org5 in _NETWORK 1_. It is now discovering and registering _NETWORK 1_:Org3. We assume that there is a single IIN with a single Steward for validating member list as well as membership credentials for all members of both the networks. - -_Note_: we assume here for simplicity that a steward as a reputed identity provider has a mechanism to validate the bona fides of an orgganization and its membership in a given network. There are other techniques involving group signatures that could be applied to corroborate an organization's claim to network membership rather than requiring a steward to use an out-of-band security mechanism, but that is presently beyond the scope of this design. - -The discovery and registration procedure steps are as follows: -- The IIN Agent for Org3 registers its membership to _NETWORK 1_ at the Steward in IIN: - * _NETWORK 1_:Org3 gets a DID (verinym) issued - * The Steward updates the member list credential for _NETWORK 1_ to include Org3 - * Org3 obtains a membership credential from Steward -- The IIN Agent for Org3 issues itself a self-signed VC containing its security group info -- The IIN Agent for _NETWORK 2_:Org2 (only one organization in the network needs to do this) obtains the new member list credential from Steward in IIN and validates it using the IIN ledger records -- The IIN Agent for _NETWORK 2_:Org2 discovers that Org3 is a member of _NETWORK 1_, fetches Org3's membership credential from Org3's IIN Agent, and validates it using the IIN ledger records -- The IIN agent for _NETWORK 2_:Org2 fetches the self-signed security group credential from the IIN agent of _NETWORK 1_:Org3 and validates it -- The IIN agent for _NETWORK 2_:Org2 triggers a flow among the IIN Agents of _NETWORK 2_ to collect signatures endorsing the security group info for _NETWORK 1_:Org3 fetched above - * The IIN Agent for _NETWORK 2_:Org1 gets this endorsement request, and validates both the membership credential and the security group info for _NETWORK 1_:Org3 by communicating with the Steward, the IIN ledger, and the IIN Agent for _NETWORK 1_:Org3 - * The IIN Agent for _NETWORK 2_:Org1 signs the request from Org2 containing the security group info for _NETWORK 1_:Org3 after the above check succeeds -- When the IIN agent for _NETWORK 2_:Org2 gets signatures from the IIN Agents representing each member of _NETWORK 2_, it submits the security group info for _NETWORK 1_:Org3 alon with the signatures to the _interop module_ (typically smart contract) for recording on the ledger of _NETWORK 2_ - * Now the ledger of _NETWORK 2_ contains the identities and certificates of all three members of _NETWORK 1_: Org3,Org4,Org5, and data plane interoperation may ensue. - -_Note_: the last step above (recording to the local ledger via the _interop module_) may be performed by IIN Agents of both Org1 and Org2 as they have no means of synchronizing their actions, but this recording will be idempotent and hence not cause any harm. - -The process above is illustrated with a few more details in the sequence of protocol diagrams below. - -![alt text](../../../static/architecture-assets/protocol-registration-phase.jpg) - -![alt text](../../../static/architecture-assets/protocol-get-info-phase.jpg) - -![alt text](../../../static/architecture-assets/protocol-update-info-phase.jpg) +Interoperation for asset or data transfers/exchanges relies on a message-passing infratructure and pan-network data processing modules, as we have seen in earlier pages. But there is yet another crucial basis these data processing modules need to satisfy our design principles of network independence and avoidance of trust parties. This is the ability of a network as a whole and of its individual members to accurately identify and authenticate another network's members. + +Further, for the networks to remain independent and interact ad hoc with each other, we cannot impose a central authority that unifies their private identity management systems. So the identity basis of interoperation must be decentralized, leading inevitably to the requirement of exchanging identity information across networks as a pre-requisite for asset and data transfers/exchanges. This is illustrated in the figure below where interoperation protocols are classified in two planes (or tiers), data and identity, with the former depending on the latter. + +![alt text](../../../static/architecture-assets/identity-data-planes.jpg) + +- In the __data plane__ lies the protocols that effect the actual exchanges of data and assets. The figure above illustrates a typical data-sharing instance, where the network at the left requests a data record from the right. The latter receives the request via the two relays (not explicitly marked in this diagram) and runs an access control check through consensus in its _interop module_ before responding with the data record and supporting proof. The network at the left receives the data and proof, again via the two relays, and verifies the data using the supplied proof. __Note: since a core part of both request and proof are digital signatures of network members, the ability to identify and authenticate network members is necessary to perform these endpoint functions__. +- Here is where the __identity plane__ enters the picture, as a trust anchor for the data plane. The most general function of this plane is illustrated in the figure, where the networks get each others' identity and configuration (i.e., membership structure and network topology) information. This exchange has as its own trust basis (or dependency) a set of identity providers and verifiers. (_Note_: these identity providers and verifiers may belong to the two networks or they could be external entities.) The outcome of the exchange is a record of the other network's identity and configuration information on one's ledger, which can then be looked up in a data plane protocol instance. + +### Identity Plane: Strawman Approach + +The simplest identity plane protocol involves a direct exchange of identity information between representatives of the two networks: in other words, an API integration. But this approach suffers from the same drawbacks that API integration in the data plane would. It diminishes a blockchain network to a single trusted spokesperson, exposing that network to risk. Even if such a spokesperson could be designated, appropriately framing access control policies for potentially every other blockchain network in the world would be very challenging. This approach is therefore insecure and not scalable, and therefore ought to be treated purely as a strawman. + +### Networks as Groups of Self-Sovereign Members + +A secure and sustainable identity plane plaform can be built on the basis of _self-sovereign identity_ and _decentralized identifiers_. We recognize that: +- Each constituent member of a blockchain network may already possess an identity from a third-party provider +- Membership within a network can be treated as a property of a sovereign organization rather than subordination to a network's governing authority +- DIDs allow members to control who they wish to share their identities with and the levels of exposure +- Network membership lists and individual members' identities can respectively be validated by different providers, thereby maintaining decentralization of infrastructure + +### Distributed Identity Management Infrastructure + +The distributed identity management infrastructure for interoperation is illustrated in the figure below. We assume the existence of one or more _Interoperation Identity Networks (IINs)_ that act as registries and credential validators for the organizations that form the memberships of blockchain networks. + +![alt text](../../../static/architecture-assets/decentralized-id-mgmt-arch.jpg) + +An IIN can be built from scratch to facilitate blockchain interoperation, but it can also be an augmentation of an existing decentralized identity provider or registry. Its purpose is to maintain identity records and credentials for organizations and validate these to third parties as per the desire of the identity or credential owner. In this model, an IIN can itself be reputed or it can bring together many reputed and trusted identity providers (called _stewards_) into a shared network. As long as the members of two blockchain networks have shared trust in one or more IINs, an identity plane exchange can be effected, thereby setting the foundation for data and asset transfers. + +#### Interoperation Identity Network + +The ideal IIN architecture is illustrated in the figure below, inspired by Hyperleder Indy, whose architecture is used in our canonical (or reference) implementation. Built on a DLT itself, an Indy-based IIN can provide the combination of assurance and decentralization that a centralized registry cannot. Further, such an IIN will support the entire SSI and DID standard, maintaining credential schemas and verification keys, and issuing _verifiable credentials_ that can be used for privacy-preserving authentications. + +![alt text](../../../static/architecture-assets/iin.jpg) + +An IIN is modeled as a network with a distributed shared ledger, implemented using an Indy Node Pool and running BFT consensus. The ledger is also (ideally) meant to be publicly accessible, though there is nothing preventing our protocols from working with a private IIN. + +A canonical IIN maintains the following: +- DID records corresponding to organizations that are members of one or more blockchain networks, whose salient attributes include: + * Unique (within an IIN) identifier or _verinym_ for the identity owner + * Service endpoint for the identity owner + * Credential schemas + * Credential definitions (public keys used to authenticate signed credentials) + +Every IIN has a set of bootstrapped _trust anchors_ called _stewards_. A steward can create other trust anchors by issuing them suitable credentials. The trust anchors are the primary identity providers in our distributed identity management architecture. They can be existing reputed authorities or identity providers who are trusted to certify blockchain networks' memberships, or they can be created ad hoc by a consortium representing the members of a blockchain network. + +For one blockchain network to discover and validate another in the identity plane, it must trust one or more IINs and trust anchors who can provide that validation. We envision a shared and mutually reinforcing trust among stewards and other anchors within an IIN. An anchor could gain trust by virtue of joining a well-established IIN. Similarly, an IIN bootstrapped with well-known stewards gains trust because of the collective reputations of those stewards. + +Examples of entities that can act as stewards or trust anchors within IINs: the Sovrin Foundation (an organization dedicated to enabling open-source digital ID management, and which also maintains Indy), companies like Maersk or Walmart that have founded real-world blockchain networks, companies like IBM or R3 that maintain popular DLT platforms. + +IINs don't have to be built on Indy. Alternatives like Sidetree exist, providing similar functionality. There are also various existing DID registries that are already issuing credentials to existing organizations. To the extent possible, we would like to leverage all of this existing infrastructure and not force any network menmber to find yet another identity provider. Therefore, these existing registries or networks can be used as IINs: the only requirement is that they follow the standards for SSI and DIDs and issuing VCs. + +#### Network Membership Credentials + +Two kinds of credentials (each with a schema and a definition) are maintained on an IIN ledger: +1. __Member list__: This contains a network name or ID and a set of DIDs of that network's members. + * This is a per-network credential whose schema and verification key must be maintained on an IIN. + * This is issued by a steward or trust anchor in an IIN and is associated with that steward's or anchor's DID. +2. __Membership__: This contains an oranization's attributes, including the set of IDs of networks to which it belongs. + * This is designed to be an extensible credential, i.e., support more attributes in the future. + * An existing membership credential (of the VC form) can be used as long as it matches the schema recorded on an IIN. + * The issuer must be a steward or trust anchor (or equivalent, if it's a non-Indy registry) in an IIN. + * This is associated with the member's DID. + +#### Identity Info: Units of Exchange + +The IIN is used to discover the membership list of a foreign network and establish the authenticity of its members. Memnbership credentials are independent of blockchain networks. + +But data plane transfers and exchanges require knowledge of in-network identities and certificates, which are issued by a network's membership manager(s) to peers and clients. These are not shared through IINs for several reasons. First, the volume of this information can be quite high and further it is subject to change based on a network's internal needs. Also, a network or its members may not wish to expose all this information to an IIN, which is designed to be publicly accessible. Therefore, it is infeasible or undesirable to shared network-specific credentials via established IINs. Instead, we will enable the _peer-to-peer_ exchange of such credentials after the membership discovery and validation procedure is complete. + +Specifically, the identity information for a network member consists of the set of certificate chains of the membership managers for that particular member (organization). These consist of one or more hierarchies of root and intermediate CA certificates. For example: +- In Fabric, each organization uses one or more MSPs (_membership service providers_), each running one or more root and intermediate Fabric-CA servers. Any network peer belonging to an organization is issued a certificate authorized by one of these certificate chains. To authenticate a network peer or client in a data plane protocol, knowledge of these certificate chains is essential. +- In Corda, the entire network typically consists of a hierarchy of CAs, from a root to multiple _doormen_, and from each doorman to multiple nodes. Finally, the certificates used to sign transactions are issued by the node CAs. Therefore, knowledge of the root, doormen, and node CA certificates is essential for authenticating signers. + +More generally, each unit of exchange corresponding to a network member is a _Security Group_, so-called because each network member is an independent organization in its own right with a security domain. + +#### IIN Agents as Member Representatives + +Every network member needs a proxy (either an abstraction or a separate module) for communication with IINs and with the members of foreign networks in the identity plane. We use the term "IIN Agent" to refer to this proxy, and illustrate its functioning as a module within a Fabric network through the reference diagram below. + +![alt text](../../../static/architecture-assets/iin-augmented-network.jpg) + +In the reference implementation, IIN Agents are built as Hyperledger Aries nodes and communicate with each other and with IIN stewards and trust anchors using the Aries protocol. (IIN stewards and trust anchors are also built as Aries nodes.) + +The list of trusted IINs is recorded on the local network's shared ledger, as illustrated in the figure (and thereby agreed through network consensus). To be able to interoperate with another network, the latter's members must have identity records maintained by sume subset of these trusted IINs and their VCs must be issued by these IINs stewards and trust anchors. + +#### Protocols + +Let us consider a scenario where _NETWORK 1_ and _NETWORK 2_ wish to interoperate, and their respective member organizations are as follows: +- _NETWORK 1_: Org3, Org4, Org5 +- _NETWORK 2_: Org1, Org2 + +Each network discovers the other's member list and obtains and records ech member's security group to the local shared ledger. We can enumerate these as follows: +- _NETWORK 1_ discovers and registers _NETWORK 2_:Org1 +- _NETWORK 1_ discovers and registers _NETWORK 2_:Org2 +- _NETWORK 2_ discovers and registers _NETWORK 1_:Org3 +- _NETWORK 2_ discovers and registers _NETWORK 1_:Org4 +- _NETWORK 2_ discovers and registers _NETWORK 1_:Org5 + +Each of these can be done in parallel and each discovery and registration operation is idempotent as long as the security group of a network member does not change. + +The high-level workflow for discovery and registration is illustrated below (using _NETWORK 2_ as the seeker and _NETWORK 1_ as the provider). + +![alt text](../../../static/architecture-assets/protocol-identity-overview.jpg) + +(_Note_: "Network unit" is synonymous with "network member") + +Prerequisites for this process are: +- The member list credential of _NETWORK 1_ is provided by a steward or trust anchor in a particular IIN which is also on the trust list recorded in the ledger of _NETWORK 2_. +- The membership credentials for both organizations in _NETWORK 1_ are supplied by one or more IINs that are on the trust list of _NETWORK 2_. +- Each of the 5 organizations (2 in _NETWORK 1_ and 3 in _NETWORK 2_) has an IIN Agent running on their behalf. + +Let us take the example of _NETWORK 2_ having already obtained the security group info for Org4 and Org5 in _NETWORK 1_. It is now discovering and registering _NETWORK 1_:Org3. We assume that there is a single IIN with a single Steward for validating member list as well as membership credentials for all members of both the networks. + +_Note_: we assume here for simplicity that a steward as a reputed identity provider has a mechanism to validate the bona fides of an orgganization and its membership in a given network. There are other techniques involving group signatures that could be applied to corroborate an organization's claim to network membership rather than requiring a steward to use an out-of-band security mechanism, but that is presently beyond the scope of this design. + +The discovery and registration procedure steps are as follows: +- The IIN Agent for Org3 registers its membership to _NETWORK 1_ at the Steward in IIN: + * _NETWORK 1_:Org3 gets a DID (verinym) issued + * The Steward updates the member list credential for _NETWORK 1_ to include Org3 + * Org3 obtains a membership credential from Steward +- The IIN Agent for Org3 issues itself a self-signed VC containing its security group info +- The IIN Agent for _NETWORK 2_:Org2 (only one organization in the network needs to do this) obtains the new member list credential from Steward in IIN and validates it using the IIN ledger records +- The IIN Agent for _NETWORK 2_:Org2 discovers that Org3 is a member of _NETWORK 1_, fetches Org3's membership credential from Org3's IIN Agent, and validates it using the IIN ledger records +- The IIN agent for _NETWORK 2_:Org2 fetches the self-signed security group credential from the IIN agent of _NETWORK 1_:Org3 and validates it +- The IIN agent for _NETWORK 2_:Org2 triggers a flow among the IIN Agents of _NETWORK 2_ to collect signatures endorsing the security group info for _NETWORK 1_:Org3 fetched above + * The IIN Agent for _NETWORK 2_:Org1 gets this endorsement request, and validates both the membership credential and the security group info for _NETWORK 1_:Org3 by communicating with the Steward, the IIN ledger, and the IIN Agent for _NETWORK 1_:Org3 + * The IIN Agent for _NETWORK 2_:Org1 signs the request from Org2 containing the security group info for _NETWORK 1_:Org3 after the above check succeeds +- When the IIN agent for _NETWORK 2_:Org2 gets signatures from the IIN Agents representing each member of _NETWORK 2_, it submits the security group info for _NETWORK 1_:Org3 alon with the signatures to the _interop module_ (typically smart contract) for recording on the ledger of _NETWORK 2_ + * Now the ledger of _NETWORK 2_ contains the identities and certificates of all three members of _NETWORK 1_: Org3,Org4,Org5, and data plane interoperation may ensue. + +_Note_: the last step above (recording to the local ledger via the _interop module_) may be performed by IIN Agents of both Org1 and Org2 as they have no means of synchronizing their actions, but this recording will be idempotent and hence not cause any harm. + +The process above is illustrated with a few more details in the sequence of protocol diagrams below. + +![alt text](../../../static/architecture-assets/protocol-registration-phase.jpg) + +![alt text](../../../static/architecture-assets/protocol-get-info-phase.jpg) + +![alt text](../../../static/architecture-assets/protocol-update-info-phase.jpg) ### References Bishakh Chandra Ghosh, Venkatraman Ramakrishna, Chander Govindarajan, Dushyant Behl, Dileban Karunamoorthy, Ermyas Abebe, Sandip Chakraborty, [Decentralized Cross-Network Identity Management for Blockchain Interoperation](https://arxiv.org/abs/2104.03277), _ICBC 2021_ diff --git a/weaver/docs/docs/external/architecture-and-design/drivers.md b/weaver/docs/docs/external/architecture-and-design/drivers.md index a4bc9e65fb..a6f55f7b6e 100644 --- a/weaver/docs/docs/external/architecture-and-design/drivers.md +++ b/weaver/docs/docs/external/architecture-and-design/drivers.md @@ -1,7 +1,7 @@ ---- -id: drivers -title: Drivers ---- +--- +id: drivers +title: Drivers +--- -The driver is responsible for all communication between the relay and its network. In the previous sections we have thought about the driver as a component of the relay. We have done this because conceptually it makes sense to think about it like that. However, in our reference implementation we have made it a seperate process which communicates with the relay via gRPC, as shown below. There are two main reasons for this: - -1. There must exist a different driver for each network type (e.g. Fabric, Corda etc.) and therefore having the driver as a seperate process makes it easy to "plug" different drivers into the relay. -1. A possible use case of the relay is that a single relay instance may have multiple drivers (e.g. if multiple entities in the network want to run their own driver). In this case, this plugin style approach of drivers makes it possible to do without having to modify code for each configuration. - -![](/architecture-assets/driver_architecture.png) +The driver is responsible for all communication between the relay and its network. In the previous sections we have thought about the driver as a component of the relay. We have done this because conceptually it makes sense to think about it like that. However, in our reference implementation we have made it a seperate process which communicates with the relay via gRPC, as shown below. There are two main reasons for this: + +1. There must exist a different driver for each network type (e.g. Fabric, Corda etc.) and therefore having the driver as a seperate process makes it easy to "plug" different drivers into the relay. +1. A possible use case of the relay is that a single relay instance may have multiple drivers (e.g. if multiple entities in the network want to run their own driver). In this case, this plugin style approach of drivers makes it possible to do without having to modify code for each configuration. + +![](/architecture-assets/driver_architecture.png) diff --git a/weaver/docs/docs/external/architecture-and-design/relay.md b/weaver/docs/docs/external/architecture-and-design/relay.md index 4dca1ea355..07f648fe49 100644 --- a/weaver/docs/docs/external/architecture-and-design/relay.md +++ b/weaver/docs/docs/external/architecture-and-design/relay.md @@ -1,7 +1,7 @@ ---- -id: relay -title: Relay ---- +--- +id: relay +title: Relay +--- -![](/architecture-assets/relay_architecture.png) - -As mentioned in the overview, relays facilitate communication of protocols between networks. To do this, they are composed of three main pieces: - -- `Relay service` - A gRPC server that listens for and handles incoming requests from other relays. For example, a remote network requesting state. -- `App service` - A gRPC server that listens for and handles requests from applications that are requesting an asset from a remote network. -- `Driver` - The driver is responsible for all communication between the relay and its network. The driver is described in more detail in [drivers](./drivers.md). - -The diagram below shows an example communication between two networks, A and B, where network A is requesting state from network B. - -![](/architecture-assets/relay_flow.png) - -1. An application sends a request to their networks relay over gRPC -1. The local relay inspects the query within the request and uses the relevant information to forward the request to the correct remote relay -1. The remote relay's driver interprets the query and invokes the smart contract for the query -1. Once network B has returned a response to its relay, the relay forwards the response back to relay A -1. The application gets the response from the relay, this can either be via a push or pull mechanism -1. The application invokes a domain specific smart contract to process the response from network B +![](/architecture-assets/relay_architecture.png) + +As mentioned in the overview, relays facilitate communication of protocols between networks. To do this, they are composed of three main pieces: + +- `Relay service` - A gRPC server that listens for and handles incoming requests from other relays. For example, a remote network requesting state. +- `App service` - A gRPC server that listens for and handles requests from applications that are requesting an asset from a remote network. +- `Driver` - The driver is responsible for all communication between the relay and its network. The driver is described in more detail in [drivers](./drivers.md). + +The diagram below shows an example communication between two networks, A and B, where network A is requesting state from network B. + +![](/architecture-assets/relay_flow.png) + +1. An application sends a request to their networks relay over gRPC +1. The local relay inspects the query within the request and uses the relevant information to forward the request to the correct remote relay +1. The remote relay's driver interprets the query and invokes the smart contract for the query +1. Once network B has returned a response to its relay, the relay forwards the response back to relay A +1. The application gets the response from the relay, this can either be via a push or pull mechanism +1. The application invokes a domain specific smart contract to process the response from network B diff --git a/weaver/docs/docs/external/architecture-and-design/weaver-dapps.md b/weaver/docs/docs/external/architecture-and-design/weaver-dapps.md index 2268810171..f9d432ac29 100644 --- a/weaver/docs/docs/external/architecture-and-design/weaver-dapps.md +++ b/weaver/docs/docs/external/architecture-and-design/weaver-dapps.md @@ -1,7 +1,7 @@ ---- -id: weaver-dapps -title: Weaver Dapps ---- +--- +id: weaver-dapps +title: Weaver Dapps +--- -As mentioned in the [overview](./overview.md), DLTs that integrate with Weaver must contain an interop (IOP) module to facilitate interoperation between ledgers. The interop module contains all the logic responsible for membership, verification policies and access control policies (refer to the RFCs for more information on these). Below shows the architecture of how these interop modules work with the two currently supported DLTs, Fabric and Corda. - -## Fabric - -When Fabric is the requesting network, the IOP module is used to verify the proof and then forward the state onto the application chaincode. - -![](/architecture-assets/fabric_dapp_flow1.png) - -When Fabric is the responding network, the IOP module is in charge of verifying the identity of the requester, making sure the requester has access to the state they are requesting, and then finally retrieving the state from the application chaincode to send back to the requesting network. - -![](/architecture-assets/fabric_dapp_flow2.png) - -Verification Policy, Access Control and Membership are modular components within the interop chaincode for seperation of concerns of the code. - -## Corda - -As can be seen from the diagrams below, the architecture for Corda is very similar to that of Fabric. The main difference is that the interop module and the application specific flows are in seperate CorDapps, instead of seperate chaincodes like in Fabric. - -![](/architecture-assets/corda_dapp_flow1.png) - -![](/architecture-assets/corda_dapp_flow2.png) +As mentioned in the [overview](./overview.md), DLTs that integrate with Weaver must contain an interop (IOP) module to facilitate interoperation between ledgers. The interop module contains all the logic responsible for membership, verification policies and access control policies (refer to the RFCs for more information on these). Below shows the architecture of how these interop modules work with the two currently supported DLTs, Fabric and Corda. + +## Fabric + +When Fabric is the requesting network, the IOP module is used to verify the proof and then forward the state onto the application chaincode. + +![](/architecture-assets/fabric_dapp_flow1.png) + +When Fabric is the responding network, the IOP module is in charge of verifying the identity of the requester, making sure the requester has access to the state they are requesting, and then finally retrieving the state from the application chaincode to send back to the requesting network. + +![](/architecture-assets/fabric_dapp_flow2.png) + +Verification Policy, Access Control and Membership are modular components within the interop chaincode for seperation of concerns of the code. + +## Corda + +As can be seen from the diagrams below, the architecture for Corda is very similar to that of Fabric. The main difference is that the interop module and the application specific flows are in seperate CorDapps, instead of seperate chaincodes like in Fabric. + +![](/architecture-assets/corda_dapp_flow1.png) + +![](/architecture-assets/corda_dapp_flow2.png) diff --git a/weaver/docs/docs/external/deployment-considerations/deployment-patterns.md b/weaver/docs/docs/external/deployment-considerations/deployment-patterns.md index 68ebce5f53..bfcd1610b2 100644 --- a/weaver/docs/docs/external/deployment-considerations/deployment-patterns.md +++ b/weaver/docs/docs/external/deployment-considerations/deployment-patterns.md @@ -1,7 +1,7 @@ ---- -id: "deployment-patterns" -title: Deployment Patterns ---- +--- +id: "deployment-patterns" +title: Deployment Patterns +--- -## Modes of Interoperability - -We identify distinct modes or patterns of interoperation based on the nature and purpose of the artifact that two networks (or parties within) have a common interest in and the purpose they wish to achieve. - -First, we will classify artifacts present on shared ledgers broadly into the following two types: -- __Assets__: This is a ledger item that is associated with a single entity (or limited set of entities), representing the real-world ownership of that item by an entity. Bitcoins in the Bitcoin network and Ether on the Ethereum network are well-known examples, but assets repreenting a wide range of tangible goods can reside on blockchian ledgers, like property titles, bank drafts, precious stones, and financial instruments like bonds and securities. -- __Data Records__: This is any information held on a ledger that describes the state of the world, context, or properties of an entity or object. It is not "owned" by, or associated with, specific entities, but is rather common knowledge within a blockchain network. - -The salient distinction between assets and data from an interoperability perspective is that the former may be present only in one network at any given instant in order to maintain its integrity whereas the latter can have copies in multiple networks without losing its value. - -Three common modes in which independent networks will seek to interoperate are as follows. (We can also refer to them as three distinct purposes.) - -### Asset Transfer -This refers to the movement of an asset from its source ledger to a consuming ledger. Since assets has _singleton_ ownership and can't be _double spent_, the transfer of an asset should result in its burning or locking in the source ledger and its creation on the target ledger. - -A typical asset transfer use case is illustrated in the figure below, where Party X initially holds Asset in Network A, and through interoperation transfers Asset to Party Y in Network B. The loss of Asset to X in A must occur simultaneously with the gain of Asset for Y in B;. i.e, these transactions must be _atomic_. ("Holding an asset" refers to a record on a network's shared ledger representing the ownership of that asset by a given entity.) - -![alt text](/use-cases/asset-transfer.png) - -### Asset Exchange -This refers to the change of ownership of an asset in a source network and a corresponding change of ownership in another network. No asset leaves the network it resides in. The well-known terminology for asset exchange is 'Atomic Cross-Chain Swap'. - -_For example_: two parties with both Bitcoin and Ethereum accounts may trade Bitcoin forEthereum based on an exchange rate agreed upon off-chain. We generalize this to permissioned networks, where it may be harder to provide guarantees of finality, and therby, atomicity. - -The figure below illustrates a typical asset exchange. Initially, Party X holds Asset M in Network A and Party Y holds Asset N in Network B. Through interoperation, an exchange occurs whereby Y holds M in A and X holds N in B. The changes in these two networks occur atomically. (_Note_: in such a use case, both X and Y must be members of both networks.) See [DvP in Financial Markets](user-stories/financial-markets.md) for an example scenario illustrating asset exchanges. - -![alt text](/use-cases/asset-exchange.png) - -Both the asset transfer and exchange patterns can be extrapolated to scenarios involving more than 2 parties, 2 assets, and 2 networks. The only fixed criterion is that the actions on all networks happen atomically. For the same reason, the infrastructure and protocols to support both asset transfers and exchanges overlap significantly. - -### Data Sharing -This refers to the transfer of data from its source ledger to a consuming ledger. (In many scenarios, data records in one network ledger may need to be shared with another network ledger in order to drive forward a process on the latter.) The data transferred can be the result of invoking a contract or a database query. There are no technical limits to the number of times a given piece of data can be copied to other ledgers. - -The below figure illustrates this pattern, where initially, Data Record is maintained only on Network A's ledger, and through interoperation, a copy resides on Network B's ledger. (_Note_: the data record may be transformed within Network B during the sharing process before a transaction is committed to its ledger.) See [Global Trade](user-stories/global-trade.md) for an example scenario illustrating data sharing. - -![alt text](/use-cases/data-transfer.png) - -### Identity -This refers to the process by which identity can be expressed and comprehended beyond the boundaries of a single network. The ability to reason about identities as real-world entities along with credentials proving their membership in various networks is key to creating a trust basis that enables the three interoperability modes listed above. From that perspective, this kind of cross-network identity management lies on a higher plane than data and asset movements. For more details on our thinking, see the Interop RFC pages. +## Modes of Interoperability + +We identify distinct modes or patterns of interoperation based on the nature and purpose of the artifact that two networks (or parties within) have a common interest in and the purpose they wish to achieve. + +First, we will classify artifacts present on shared ledgers broadly into the following two types: +- __Assets__: This is a ledger item that is associated with a single entity (or limited set of entities), representing the real-world ownership of that item by an entity. Bitcoins in the Bitcoin network and Ether on the Ethereum network are well-known examples, but assets repreenting a wide range of tangible goods can reside on blockchian ledgers, like property titles, bank drafts, precious stones, and financial instruments like bonds and securities. +- __Data Records__: This is any information held on a ledger that describes the state of the world, context, or properties of an entity or object. It is not "owned" by, or associated with, specific entities, but is rather common knowledge within a blockchain network. + +The salient distinction between assets and data from an interoperability perspective is that the former may be present only in one network at any given instant in order to maintain its integrity whereas the latter can have copies in multiple networks without losing its value. + +Three common modes in which independent networks will seek to interoperate are as follows. (We can also refer to them as three distinct purposes.) + +### Asset Transfer +This refers to the movement of an asset from its source ledger to a consuming ledger. Since assets has _singleton_ ownership and can't be _double spent_, the transfer of an asset should result in its burning or locking in the source ledger and its creation on the target ledger. + +A typical asset transfer use case is illustrated in the figure below, where Party X initially holds Asset in Network A, and through interoperation transfers Asset to Party Y in Network B. The loss of Asset to X in A must occur simultaneously with the gain of Asset for Y in B;. i.e, these transactions must be _atomic_. ("Holding an asset" refers to a record on a network's shared ledger representing the ownership of that asset by a given entity.) + +![alt text](/use-cases/asset-transfer.png) + +### Asset Exchange +This refers to the change of ownership of an asset in a source network and a corresponding change of ownership in another network. No asset leaves the network it resides in. The well-known terminology for asset exchange is 'Atomic Cross-Chain Swap'. + +_For example_: two parties with both Bitcoin and Ethereum accounts may trade Bitcoin forEthereum based on an exchange rate agreed upon off-chain. We generalize this to permissioned networks, where it may be harder to provide guarantees of finality, and therby, atomicity. + +The figure below illustrates a typical asset exchange. Initially, Party X holds Asset M in Network A and Party Y holds Asset N in Network B. Through interoperation, an exchange occurs whereby Y holds M in A and X holds N in B. The changes in these two networks occur atomically. (_Note_: in such a use case, both X and Y must be members of both networks.) See [DvP in Financial Markets](user-stories/financial-markets.md) for an example scenario illustrating asset exchanges. + +![alt text](/use-cases/asset-exchange.png) + +Both the asset transfer and exchange patterns can be extrapolated to scenarios involving more than 2 parties, 2 assets, and 2 networks. The only fixed criterion is that the actions on all networks happen atomically. For the same reason, the infrastructure and protocols to support both asset transfers and exchanges overlap significantly. + +### Data Sharing +This refers to the transfer of data from its source ledger to a consuming ledger. (In many scenarios, data records in one network ledger may need to be shared with another network ledger in order to drive forward a process on the latter.) The data transferred can be the result of invoking a contract or a database query. There are no technical limits to the number of times a given piece of data can be copied to other ledgers. + +The below figure illustrates this pattern, where initially, Data Record is maintained only on Network A's ledger, and through interoperation, a copy resides on Network B's ledger. (_Note_: the data record may be transformed within Network B during the sharing process before a transaction is committed to its ledger.) See [Global Trade](user-stories/global-trade.md) for an example scenario illustrating data sharing. + +![alt text](/use-cases/data-transfer.png) + +### Identity +This refers to the process by which identity can be expressed and comprehended beyond the boundaries of a single network. The ability to reason about identities as real-world entities along with credentials proving their membership in various networks is key to creating a trust basis that enables the three interoperability modes listed above. From that perspective, this kind of cross-network identity management lies on a higher plane than data and asset movements. For more details on our thinking, see the Interop RFC pages. diff --git a/weaver/docs/docs/external/publications.md b/weaver/docs/docs/external/publications.md index 534783240d..2d6c0e2426 100644 --- a/weaver/docs/docs/external/publications.md +++ b/weaver/docs/docs/external/publications.md @@ -1,7 +1,7 @@ ---- -id: publications -title: Publications ---- +--- +id: publications +title: Publications +--- ## 2021 - -### Verifiable Observation of Permissioned Ledgers - + +### Verifiable Observation of Permissioned Ledgers + *IEEE International Conference on Blockchain and Cryptocurrency*, 2021 - -Ermyas Abebe, Yining Hu, Allison Irvin, Dileban Karunamoorthy, Vinayaka Pandit, Venkatraman Ramakrishna, Jiangshan Yu - -[`[arXiv]`](https://arxiv.org/abs/2012.07339) - -### Decentralized Cross-Network Identity Management for Blockchain Interoperation - + +Ermyas Abebe, Yining Hu, Allison Irvin, Dileban Karunamoorthy, Vinayaka Pandit, Venkatraman Ramakrishna, Jiangshan Yu + +[`[arXiv]`](https://arxiv.org/abs/2012.07339) + +### Decentralized Cross-Network Identity Management for Blockchain Interoperation + *IEEE International Conference on Blockchain and Cryptocurrency*, 2021 - -Bishakh Chandra Ghosh, Sandip Chakraborty, Venkatraman Ramakrishna, Chander Govindarajan,Dushyant Behl, Dileban Karunamoorthy, Ermyas Abebe - -[`[arXiv]`](https://arxiv.org/abs/2104.03277) - - -## 2019 - -### Enabling Enterprise Blockchain Interoperability with Trusted Data Transfer - -*Proceedings of the 20th International Middleware Conference Industrial Track*, 2019 - -Ermyas Abebe, Dushyant Behl, Chander Govindarajan, Yining Hu, Dileban Karunamoorthy, Petr Novotny, Vinayaka Pandit, Venkatraman Ramakrishna, Christian Vecchiola - -[`[Proceedings]`](https://dl.acm.org/doi/abs/10.1145/3366626.3368129) [`[arXiv]`]( https://arxiv.org/abs/1911.01064) - - -### On the Interoperability of Distributed Ledgers, Medium - -Dileban Karunamoorthy, Ermyas Abebe - -[`[Medium]`](https://medium.com/thinkdecentralized/on-the-interoperability-of-distributed-ledgers-15f584b79808) + +Bishakh Chandra Ghosh, Sandip Chakraborty, Venkatraman Ramakrishna, Chander Govindarajan,Dushyant Behl, Dileban Karunamoorthy, Ermyas Abebe + +[`[arXiv]`](https://arxiv.org/abs/2104.03277) + + +## 2019 + +### Enabling Enterprise Blockchain Interoperability with Trusted Data Transfer + +*Proceedings of the 20th International Middleware Conference Industrial Track*, 2019 + +Ermyas Abebe, Dushyant Behl, Chander Govindarajan, Yining Hu, Dileban Karunamoorthy, Petr Novotny, Vinayaka Pandit, Venkatraman Ramakrishna, Christian Vecchiola + +[`[Proceedings]`](https://dl.acm.org/doi/abs/10.1145/3366626.3368129) [`[arXiv]`]( https://arxiv.org/abs/1911.01064) + + +### On the Interoperability of Distributed Ledgers, Medium + +Dileban Karunamoorthy, Ermyas Abebe + +[`[Medium]`](https://medium.com/thinkdecentralized/on-the-interoperability-of-distributed-ledgers-15f584b79808) diff --git a/weaver/docs/docs/external/roadmap.md b/weaver/docs/docs/external/roadmap.md index 8edeb70642..2e3f6bbe75 100644 --- a/weaver/docs/docs/external/roadmap.md +++ b/weaver/docs/docs/external/roadmap.md @@ -1,7 +1,7 @@ ---- -id: roadmap -title: Roadmap ---- +--- +id: roadmap +title: Roadmap +--- -In traditional financial markets parties trade assets such as securities and derivatives for cash or other assets. To reduce risk, various clearing and settlement processes and intermediaries are often involved. One form of settlement is a DvP (delivery versus payment) where the transfer of securities is performed only in the event of a corresponding payment. This arrangement reduces principal risk by ensuring that both parties receive their end of the exchange. However, settlement in financial markets are slow and time consuming. It also involves counterparty risks and requires intermediaries. - -Over the past few years, we have been seeing significant efforts in digitising and tokenising both currencies and securities on Distributed Ledger Technology (DLT) infrastructures. On the one hand we have seen concerted efforts around Central Bank Digital Currencies (CBDC) being added to the landscape of other blockchain based payment networks. On the other hand, we have also seen efforts such as that from the Australian Stock Exchange (ASX) to replace its current settlement system--Clearing House Electronic Subregister System (CHESS) with a DLT based platform by 2021. - -Against this backdrop, a number of central banks have been exploring the potential of performing DvP settlement across a currency ledger and a securities ledger. In this use case, we use this as a motivating use-case for our discussions. The scenario involves two decentralised ledgers, namely, a currency ledger and a securities ledger, based on different DLT protocols performing a coordinated transfer of assets in their respective ledgers. - -The figure below depicts this scenario in the context of two organisations--*Org-A* and *Org-B*. *Org-B* wants to purchase some securities owned by *Org-A* and both organisations have accounts on both ledgers. This scenario is simplified and leaves out a number of additional real world processes. For instance, the buyer and seller for securities need to discover each other and agree on the price and terms of a sale. In addition, an offer to sell securities might be fulfilled by multiple buyers taking smaller portions of the amount for sale. Such capabilities are often offered by centralised exchanges that offer capabilities such as order books and matching engines to address these needs. In this scenario we instead focus on the settlement process that follows such steps, once the parties of an exchange and the price of the exchange for an asset are determined. - -To effect the settlement of this exchange between *Org-A* and *Org-B*, the following two transactions will have to happen atomically across both networks: i) transfer of payment from *Org-B*'s currency account in the CBDC ledger to *Org-A* while at the same time ii) the entitlements of the designated securities are transferred from *Org-A* to *Org-B*. The scenario would need to guarantee that after the transaction execution, either both parties have their end of the exchange or neither does and that this exchange is performed in a timely manner. - -![Simple DvP scenario in financial markets](../../../static/use-cases/financial-markets-1.png) - -The settlement of the exchange of securities from *Org-A* to *Org-B* in the Financial Securities Network for a simultaneous payment from *Org-B* to *Org-A* in the CBDC network is coordinated by Weaver using [Hashed Time Lock Contracts](https://en.bitcoin.it/wiki/Hash_Time_Locked_Contracts). -This protocol essentially has three phases: -- Fund locking: To initialise an asset exchange, it is common for one or both parties to first lock up funds with a fund-withholding party on his or her own blockchain. Temporary fund locking ensures the locked fund cannot be used for other purposes while the exchange is being executed. This scheme is often used with a specified timeout to provide flexibility for reclaiming locked funds if the exchange does not take place. -- Fund redeeming: In general, the execution requires a pair of transactions to occur on both blockchains, e.g., from Org-A to Org-B on the FSN ledger and from Org-B to Org-A in CBDC ledger. When certain conditions are met, the locked funds can be redeemed by, or paid to the respective users. The execution of the exchange can be carried out by users themselves, or through other trusted third parties. These trusted third parties can be stand-alone parties that are not otherwise involved in both blockchains, or part of either blockchain.  -- Refund: For protocols that are initialised with a temporary fund-locking, the locked funds can usually be reclaimed by the initial owner after a specified timeout, if a redemption has not occurred.  - -The process proceeds as follows, and is further illustrated in the figure below: -1. **Org-A locks its securities in FSN ledger**: *Org-A* first creates some secret S, known only to it and locks its securities using the hash of S. The securities are configured to redeemable by *Org-B* if it presents S within some specified time threshold. -1. **Org-B locks payments tokens in CBDC ledger**: Org-B, observes that *Org-A* has locked its securities in the FSN network and does a corresponding lock of its payment tokens with the hash of S, used by *Org-A* in locking its securities. The payment tokens are redeemable only by Org-A, if it submits a transaction that reveals S within a specified time. -1. **Org-A checks Org-B's contract in CBDC ledger**: *Org-A* checks the CBDC network to ensure that the payments tokens are locked by Org-B. -1. **Org-A claims payments in CBDC ledger**: *Org-A* submits a transaction to claim the payments tokens, by revealing the secret S. -1. **Org-B claims securities in FSN ledger**: *Org-B* observes that the value of S has been revealed in the CBDC network by *Org-A* in step 4, and submits a transaction to claim the securities in the FSN network using the revealed secret. - -![Simple DvP scenario in financial markets](../../../static/use-cases/financial-markets-2.png) +In traditional financial markets parties trade assets such as securities and derivatives for cash or other assets. To reduce risk, various clearing and settlement processes and intermediaries are often involved. One form of settlement is a DvP (delivery versus payment) where the transfer of securities is performed only in the event of a corresponding payment. This arrangement reduces principal risk by ensuring that both parties receive their end of the exchange. However, settlement in financial markets are slow and time consuming. It also involves counterparty risks and requires intermediaries. + +Over the past few years, we have been seeing significant efforts in digitising and tokenising both currencies and securities on Distributed Ledger Technology (DLT) infrastructures. On the one hand we have seen concerted efforts around Central Bank Digital Currencies (CBDC) being added to the landscape of other blockchain based payment networks. On the other hand, we have also seen efforts such as that from the Australian Stock Exchange (ASX) to replace its current settlement system--Clearing House Electronic Subregister System (CHESS) with a DLT based platform by 2021. + +Against this backdrop, a number of central banks have been exploring the potential of performing DvP settlement across a currency ledger and a securities ledger. In this use case, we use this as a motivating use-case for our discussions. The scenario involves two decentralised ledgers, namely, a currency ledger and a securities ledger, based on different DLT protocols performing a coordinated transfer of assets in their respective ledgers. + +The figure below depicts this scenario in the context of two organisations--*Org-A* and *Org-B*. *Org-B* wants to purchase some securities owned by *Org-A* and both organisations have accounts on both ledgers. This scenario is simplified and leaves out a number of additional real world processes. For instance, the buyer and seller for securities need to discover each other and agree on the price and terms of a sale. In addition, an offer to sell securities might be fulfilled by multiple buyers taking smaller portions of the amount for sale. Such capabilities are often offered by centralised exchanges that offer capabilities such as order books and matching engines to address these needs. In this scenario we instead focus on the settlement process that follows such steps, once the parties of an exchange and the price of the exchange for an asset are determined. + +To effect the settlement of this exchange between *Org-A* and *Org-B*, the following two transactions will have to happen atomically across both networks: i) transfer of payment from *Org-B*'s currency account in the CBDC ledger to *Org-A* while at the same time ii) the entitlements of the designated securities are transferred from *Org-A* to *Org-B*. The scenario would need to guarantee that after the transaction execution, either both parties have their end of the exchange or neither does and that this exchange is performed in a timely manner. + +![Simple DvP scenario in financial markets](../../../static/use-cases/financial-markets-1.png) + +The settlement of the exchange of securities from *Org-A* to *Org-B* in the Financial Securities Network for a simultaneous payment from *Org-B* to *Org-A* in the CBDC network is coordinated by Weaver using [Hashed Time Lock Contracts](https://en.bitcoin.it/wiki/Hash_Time_Locked_Contracts). +This protocol essentially has three phases: +- Fund locking: To initialise an asset exchange, it is common for one or both parties to first lock up funds with a fund-withholding party on his or her own blockchain. Temporary fund locking ensures the locked fund cannot be used for other purposes while the exchange is being executed. This scheme is often used with a specified timeout to provide flexibility for reclaiming locked funds if the exchange does not take place. +- Fund redeeming: In general, the execution requires a pair of transactions to occur on both blockchains, e.g., from Org-A to Org-B on the FSN ledger and from Org-B to Org-A in CBDC ledger. When certain conditions are met, the locked funds can be redeemed by, or paid to the respective users. The execution of the exchange can be carried out by users themselves, or through other trusted third parties. These trusted third parties can be stand-alone parties that are not otherwise involved in both blockchains, or part of either blockchain.  +- Refund: For protocols that are initialised with a temporary fund-locking, the locked funds can usually be reclaimed by the initial owner after a specified timeout, if a redemption has not occurred.  + +The process proceeds as follows, and is further illustrated in the figure below: +1. **Org-A locks its securities in FSN ledger**: *Org-A* first creates some secret S, known only to it and locks its securities using the hash of S. The securities are configured to redeemable by *Org-B* if it presents S within some specified time threshold. +1. **Org-B locks payments tokens in CBDC ledger**: Org-B, observes that *Org-A* has locked its securities in the FSN network and does a corresponding lock of its payment tokens with the hash of S, used by *Org-A* in locking its securities. The payment tokens are redeemable only by Org-A, if it submits a transaction that reveals S within a specified time. +1. **Org-A checks Org-B's contract in CBDC ledger**: *Org-A* checks the CBDC network to ensure that the payments tokens are locked by Org-B. +1. **Org-A claims payments in CBDC ledger**: *Org-A* submits a transaction to claim the payments tokens, by revealing the secret S. +1. **Org-B claims securities in FSN ledger**: *Org-B* observes that the value of S has been revealed in the CBDC network by *Org-A* in step 4, and submits a transaction to claim the securities in the FSN network using the revealed secret. + +![Simple DvP scenario in financial markets](../../../static/use-cases/financial-markets-2.png) diff --git a/weaver/docs/docs/external/user-stories/global-trade.md b/weaver/docs/docs/external/user-stories/global-trade.md index 2e2cb9d4da..5709a745c1 100644 --- a/weaver/docs/docs/external/user-stories/global-trade.md +++ b/weaver/docs/docs/external/user-stories/global-trade.md @@ -1,7 +1,7 @@ ---- -id: global-trade -title: Global Trade ---- +--- +id: global-trade +title: Global Trade +--- -The examples in this page cover the __global trade__ application domain and the __data sharing__ pattern. - -## Process Overview -At its simplest, international trade is about a party in one country buying certain goods from a party in another country. Because the goods cross international boundaries, the buyer is called an _importer_ and the seller is called an _exporter_. For the same reason, this process is not as straightforward as, say, purchasing an item from a retailer. - -The exporting of goods in most countries is governed by a host of regulatory provisions and authorities, making the very act of clearing the sale and getting the goods ready for shipment a complex one. Further, an exporter must rely on one or more _carriers_ to move the shipment from source to destination while managing all of the risks this entails. - -But this only covers the shipping logistics. The trading parties, i.e., the exporter and importer, both face what is called _counterparty risk_, or the hazard of giving something up without a guarantee of receiving something in return. If the exporter ships the goods first, the importer may renege on the payment. And if the importer mmakes the payment first, the exporter may renege on the shipment. To hedge against this risk, sophisticated process of _trade finance_ have evolved over centuries, with banks or other financial institutions providing the sorts of guarantees (in exchange for fees) that enable exporters and importers to safely conduct trades. - -Permissioned blockchains are a great fit to manage such trade scenarios, involving multiple independent entities and no governing authorities, using smart contracts. Let us now see two kinds of processes in action, each of which can be managed in its own restricted network: -1. __Trade logistics__: preparation, clearance, and export of goods -2. __Trade finance__: payment guarantees and fulfillment - -## Networks in Isolation -There exist real business networks in production that manage trade logistics and finance, but they can be very complex. We will present highly simplified versions of these processes, and focus on the aspects that will motivate the need for data sharing across networks. - -Also, we will henceforth use the terms _buyer_ and _seller_ instead of _importer_ and _exporter_ respectively. - -### Initiating a Trade -Our trade process begins offline, with buyer and seller negotiating and agreeing on the sale of particular goods for given payment. We will assume that a _purchase order_ is created and contains a unique id we can use as reference in subsequent steps. This is illustrated in the figure below. - -![alt text](../../../static/use-cases/purchase-order.png) - -### Trade Logistics Network -The figure below represents a trade logistics network consisting of a seller and a carrier, loosely inspired by the TradeLens network built on Hyperledger Fabric. Think of the seller as a coffee plantation owner is Brazil, for example, and the carrier as a prominent shipping company like Maersk. - -![alt text](../../../static/use-cases/trade-logistics-network.png) - -The seller begins by booking a shipping consignment (associated with the purchase order id) and then registering its creation. It then hands the consignment over to the carrier. In a real life export scenario, this process involves a lot of documentation and approval cycles, but we are going to ignore all of those here. The carrier supplies documents certifying its possession of the consignment and the contents within it. The _bill of lading_ (B/L for short) is one of these documents, and though there may be others, like a packing list and a shipping manifest, we only need one to motivate interoperability. So we will keep it simple and assume that the carrier simply uploads a B/L. The seller examines and accepts this document, following which the carrier dispatches the consignment. - -__Note that, at this point, a valid B/L is recorded on the trade logistics network ledger, a fact we will make use of soon enough.__ - -### Trade Finance Network -The figure below represents a trade finance network consisting of a seller, a buyer, and their respective banks. This is loosely inspired by the We.Trade network built on Hyperledger Fabric and the Marco Polo network built on R3 Corda. Think of the seller as our coffee plantation owner in the logistics network, the buyer as Starbucks, and the banks as Bank of America and HSBC Bank, for example. - -![alt text](../../../static/use-cases/trade-finance-network.png) - -Traders and banks use a variety of mechanisms to mitigate counterparty risk, one of them being _open accounting_, used in networks like We.Trade. We pick the popular _letter of credit_ (L/C for short) instrument for our trade finance story as this exemplifies the inherent link between logistics and finance (we will see this later). The process begins with the buyer requesting an L/C from its bank for a given trade, referring to the id of the purchase order generated earlier. In simplest terms, an L/C is a promise made by a bank to pay a certain amount to the bearer of certain documents associated with a given export shipment. In our scenario, the buyer's bank issues an L/C promising to pay the seller (through its bank) the amount due to it upon production of a valid B/L. This L/C proposal is recorded on the ledger, and subsequently approved by the seller's bank. After the seller uploads a B/L, the seller's bank is allowed to register a request for payment. This leaves a payment obligation for the buyer's bank on the ledger, which is where we will conclude the scenario, as the actual payment is carried out through a separate process on a different network. - -__Note that the seller is supposed to produce and record a valid B/L in Step 4.__ - -## Linking Finance with Logistics -It is obvious that the logistics and finance processes are linked. Both begin with references to a common purchase order id and both involve bills of lading. Let us focus on the B/L, as it exemplifies a common pattern in these kinds of business networks: _that of being generated in one network and being used in another_. Because thee are two separate networks, the trade finance network depends on the seller to upload a B/L. But here, we encounter another kind of hazard, one we discussed earlier in the [challenges](./overview#challenges-to-overcome) section. The seller has an incentive to produce a fake bill of lading in order to get paid for goods it may not have dispatched and may have no intention of dispatching. In the present setup, the trade finance network as a whole, nor the buyer or its bank, has visibility into the trade logistics network's ledger, and hence have to trust the seller's word. - -This hazard can be avoided if the networks are interoperable, and can share data with each other. Specifically, if the trade logistics network can share a B/L recorded on its ledger _institutionally_ with the trade finance network. To see how this works, see the diagram below, which contains both the networks and merges their flows. - -![alt text](../../../static/use-cases/interop-bl.png) - -Step 4 in the [isolated trade finance network](./global-trade#trade-finance-network) is now replaced with an interoperation step (Step 10) whereby the trade finance network obtains a B/L from the trade logistics network via a data-sharing protocol. This avoids the hazard of having to depend on an unreliable seller to supply a valid B/L. But it is not enough for the trade logistics network to share B/L data. It must also share some _proof_ or evidence that the B/L is presently on record in its shared ledger. - -__Note: in general, an interoperation mechanism for data sharing must communicate data as well as an associated proof that can be _independently verified_ by every memebr of the receiving network.__ - -## Extending the Scenario -The above example conforms to how the logistics and finance processes work today. Letters of credit typically specify bills of lading among the lists of documents that must be supplied to claim a payment. But state-of-the-art blockchain technology and permissioned networks can facilitate a lot more process innovation than earlier technology could. - -The present trade logistics network allows a consignment to be created and dispatched without any knowledge of how the trade will be financed. But in real life, there is a need to track imports and exports carefully to ensure that no regulations are broken, and secondarily, to avoid wasted effort. Therefore, we can envision trade logistics networks requiring some evidence of the financial arrangements of a trade before it allows a seller and a carrier to carry out with the shipping process. - -The process augmentation is illustrated in the figure below with the insertion of a new Step 6 between the booking and the creation of a shipping consignment. - -![alt text](../../../static/use-cases/interop-lc-bl.png) - -Like Step 11 (Step 10 in the earlier figure), this is a data-sharing interoperation step where the L/C proposed and accepted on the trade finance network's ledger is copied to the trade logistics network's ledger. (As with the B/L sharing, proof of the L/C ledger record must accompany L/C data.) In this new process, the trade logistics network will not waste time processing shipments that do not have a backing L/C guarantee from the trade finance network. - -__Note that in the interoperation steps, the artifact being shared by one network with another (B/L or L/C) does not have to be copied verbatim to the receiving network's ledger. The process incorporates transformations carried out through smart contract transactions occurring through their networks' native consensus mechanisms.__ - -## Vision: Network of Networks -The promise of blockchain was a more decentralized yet trustworthy internet, but as we saw earlier, networks like Bitcoin and Ethereum may not fulfill that promise, largely because they have technical limitations when it comes to performance and scaling, privacy preservation, and auditability. At the same time, private blockchain networks are here to stay, and they do overcome these technical limitations, albeit at smaller scale. In the longer term, a more sustainable and achievable vision will be to allow private networks to exist while possessing the means to interoperate with other private networks. The interlinking of a trade logistics network with a trade finance network was just a sample. There is more aspects to an international trade scenario: more networks and more cross-network dependencies. But as long as we can institute mechanisms to link two networks directly for data-sharing, we can extrapolate our two-network scenario into a network-of-networks scenario. - -To show how this will work, we will add two more networks to the mix. Business networks exist to track the quality and health of perishable goods from the production source to the end retailer. These networks complement networks like Trade Lens, which manage the long-distance shipping that occurs in the middle but have no visibiity into the goods before consignment creation or after delivery at the destination by the carrier. To track goods at either ends is the function of networks like IBM Food Trust, which would be ideal for the coffee shipment example we used earlier. A separate aspect of our trade scenario is the actual payment a buyer makes to the seller. Our trade finance network ends by recording a payment obligation, but the transfer of money needs to occur on a separate payment network, like, for example, the Stellar Network. - -The figure below presents our vision for how cross-network data sharing can help smoothen and simplify all aspects of global trade despite the fact that different sub-processes occur on independent private networks. - -![alt text](../../../static/use-cases/interop-four-networks-trade.png) - -The _Food Tracking Network_ is loosely inspired by IBM Food Trust and the _Payments Network_ loosely inspired by Stellar. -- The seller and buyer, as the trading parties, belong to the food tracking network. The process in this network begins with a registration of a purchase order, following which perishable goods (think coffee seeds, for example) are tracked from farms to a warehouse where a shipping consignment is created. Whenever the carier delivers the shipment, the fact of delivery is recorded as is the condition of the goods within. -- The payment network has the buyer's and seller's bank as members. Action in this network is triggered by the buyer's bank making a payment, or a monetary transfer, to the seller's bank. Both banks have accounts in this network and the payment succeeds or fails depending on available account balance. - -There are two parallel timelines starting at Step 17: -- One involves the trade finance network and the payments network (Steps 17-20). Step 18 contains both "Make Payment" and "Receive Payment" as these actions are supposed to occur atomically within the payments network. These pertain to the fulfilment of the payment promised to the seller by the buyer. -- Another involves the trade logistics network and the food tracking network (Steps 17-19). These pertain to the tracking of goods after dispatch and confirmation of their subsequent delivery and condition. - -You may notice we have augmented the trade logistics and trade finance processes as follows: -- Step 17 in the trade logistics network illustrates a sequence of transactions, each recording the location and condition of the goods in transit at periodic intervals. We assume that this information can be procured using sensors deployed with the consignment. -- Step 20 in the trade finance network results in the cancelling of the payment obligation recorded by the seller's bank in Step 17 within that network ("Request Payment"), thereby concluding the trade instance associated with the purchase order id generated in Step 1. - -The data-sharing interoperation steps are as follows: - -- _Step 3_: The trade finance network fetches a purchase order from the food tracking network before permitting an L/C request to be made. -- _Step 8_: The trade logistics network fetches an L/C from the trade finance network before permitting a consignment to be created. -- _Step 9_: The food tracking network fetches a consignment booking record and an associated L/C from the trade logistics network before permitting tracking of goods from the source to the shipping warehouse. -- _Step 11_: The trade logistics network fetches tracking information indicating delivery of goods to the warehouse before permitting a consignment to be created. -- _Step 16_: The trade finance network fetches a B/L from the trade logistics network before permitting the seller's bank to register a payment request. -- _Step 18_: This is a recurring step, in each instance of which the food tracking network fetches location and condition information for a given consignment from the trade logistics network, and does not permit the confirmation of consignment delivery and the integrity of the goods within until the shipment reaches its destination and its condition meets the required standard. +The examples in this page cover the __global trade__ application domain and the __data sharing__ pattern. + +## Process Overview +At its simplest, international trade is about a party in one country buying certain goods from a party in another country. Because the goods cross international boundaries, the buyer is called an _importer_ and the seller is called an _exporter_. For the same reason, this process is not as straightforward as, say, purchasing an item from a retailer. + +The exporting of goods in most countries is governed by a host of regulatory provisions and authorities, making the very act of clearing the sale and getting the goods ready for shipment a complex one. Further, an exporter must rely on one or more _carriers_ to move the shipment from source to destination while managing all of the risks this entails. + +But this only covers the shipping logistics. The trading parties, i.e., the exporter and importer, both face what is called _counterparty risk_, or the hazard of giving something up without a guarantee of receiving something in return. If the exporter ships the goods first, the importer may renege on the payment. And if the importer mmakes the payment first, the exporter may renege on the shipment. To hedge against this risk, sophisticated process of _trade finance_ have evolved over centuries, with banks or other financial institutions providing the sorts of guarantees (in exchange for fees) that enable exporters and importers to safely conduct trades. + +Permissioned blockchains are a great fit to manage such trade scenarios, involving multiple independent entities and no governing authorities, using smart contracts. Let us now see two kinds of processes in action, each of which can be managed in its own restricted network: +1. __Trade logistics__: preparation, clearance, and export of goods +2. __Trade finance__: payment guarantees and fulfillment + +## Networks in Isolation +There exist real business networks in production that manage trade logistics and finance, but they can be very complex. We will present highly simplified versions of these processes, and focus on the aspects that will motivate the need for data sharing across networks. + +Also, we will henceforth use the terms _buyer_ and _seller_ instead of _importer_ and _exporter_ respectively. + +### Initiating a Trade +Our trade process begins offline, with buyer and seller negotiating and agreeing on the sale of particular goods for given payment. We will assume that a _purchase order_ is created and contains a unique id we can use as reference in subsequent steps. This is illustrated in the figure below. + +![alt text](../../../static/use-cases/purchase-order.png) + +### Trade Logistics Network +The figure below represents a trade logistics network consisting of a seller and a carrier, loosely inspired by the TradeLens network built on Hyperledger Fabric. Think of the seller as a coffee plantation owner is Brazil, for example, and the carrier as a prominent shipping company like Maersk. + +![alt text](../../../static/use-cases/trade-logistics-network.png) + +The seller begins by booking a shipping consignment (associated with the purchase order id) and then registering its creation. It then hands the consignment over to the carrier. In a real life export scenario, this process involves a lot of documentation and approval cycles, but we are going to ignore all of those here. The carrier supplies documents certifying its possession of the consignment and the contents within it. The _bill of lading_ (B/L for short) is one of these documents, and though there may be others, like a packing list and a shipping manifest, we only need one to motivate interoperability. So we will keep it simple and assume that the carrier simply uploads a B/L. The seller examines and accepts this document, following which the carrier dispatches the consignment. + +__Note that, at this point, a valid B/L is recorded on the trade logistics network ledger, a fact we will make use of soon enough.__ + +### Trade Finance Network +The figure below represents a trade finance network consisting of a seller, a buyer, and their respective banks. This is loosely inspired by the We.Trade network built on Hyperledger Fabric and the Marco Polo network built on R3 Corda. Think of the seller as our coffee plantation owner in the logistics network, the buyer as Starbucks, and the banks as Bank of America and HSBC Bank, for example. + +![alt text](../../../static/use-cases/trade-finance-network.png) + +Traders and banks use a variety of mechanisms to mitigate counterparty risk, one of them being _open accounting_, used in networks like We.Trade. We pick the popular _letter of credit_ (L/C for short) instrument for our trade finance story as this exemplifies the inherent link between logistics and finance (we will see this later). The process begins with the buyer requesting an L/C from its bank for a given trade, referring to the id of the purchase order generated earlier. In simplest terms, an L/C is a promise made by a bank to pay a certain amount to the bearer of certain documents associated with a given export shipment. In our scenario, the buyer's bank issues an L/C promising to pay the seller (through its bank) the amount due to it upon production of a valid B/L. This L/C proposal is recorded on the ledger, and subsequently approved by the seller's bank. After the seller uploads a B/L, the seller's bank is allowed to register a request for payment. This leaves a payment obligation for the buyer's bank on the ledger, which is where we will conclude the scenario, as the actual payment is carried out through a separate process on a different network. + +__Note that the seller is supposed to produce and record a valid B/L in Step 4.__ + +## Linking Finance with Logistics +It is obvious that the logistics and finance processes are linked. Both begin with references to a common purchase order id and both involve bills of lading. Let us focus on the B/L, as it exemplifies a common pattern in these kinds of business networks: _that of being generated in one network and being used in another_. Because thee are two separate networks, the trade finance network depends on the seller to upload a B/L. But here, we encounter another kind of hazard, one we discussed earlier in the [challenges](./overview#challenges-to-overcome) section. The seller has an incentive to produce a fake bill of lading in order to get paid for goods it may not have dispatched and may have no intention of dispatching. In the present setup, the trade finance network as a whole, nor the buyer or its bank, has visibility into the trade logistics network's ledger, and hence have to trust the seller's word. + +This hazard can be avoided if the networks are interoperable, and can share data with each other. Specifically, if the trade logistics network can share a B/L recorded on its ledger _institutionally_ with the trade finance network. To see how this works, see the diagram below, which contains both the networks and merges their flows. + +![alt text](../../../static/use-cases/interop-bl.png) + +Step 4 in the [isolated trade finance network](./global-trade#trade-finance-network) is now replaced with an interoperation step (Step 10) whereby the trade finance network obtains a B/L from the trade logistics network via a data-sharing protocol. This avoids the hazard of having to depend on an unreliable seller to supply a valid B/L. But it is not enough for the trade logistics network to share B/L data. It must also share some _proof_ or evidence that the B/L is presently on record in its shared ledger. + +__Note: in general, an interoperation mechanism for data sharing must communicate data as well as an associated proof that can be _independently verified_ by every memebr of the receiving network.__ + +## Extending the Scenario +The above example conforms to how the logistics and finance processes work today. Letters of credit typically specify bills of lading among the lists of documents that must be supplied to claim a payment. But state-of-the-art blockchain technology and permissioned networks can facilitate a lot more process innovation than earlier technology could. + +The present trade logistics network allows a consignment to be created and dispatched without any knowledge of how the trade will be financed. But in real life, there is a need to track imports and exports carefully to ensure that no regulations are broken, and secondarily, to avoid wasted effort. Therefore, we can envision trade logistics networks requiring some evidence of the financial arrangements of a trade before it allows a seller and a carrier to carry out with the shipping process. + +The process augmentation is illustrated in the figure below with the insertion of a new Step 6 between the booking and the creation of a shipping consignment. + +![alt text](../../../static/use-cases/interop-lc-bl.png) + +Like Step 11 (Step 10 in the earlier figure), this is a data-sharing interoperation step where the L/C proposed and accepted on the trade finance network's ledger is copied to the trade logistics network's ledger. (As with the B/L sharing, proof of the L/C ledger record must accompany L/C data.) In this new process, the trade logistics network will not waste time processing shipments that do not have a backing L/C guarantee from the trade finance network. + +__Note that in the interoperation steps, the artifact being shared by one network with another (B/L or L/C) does not have to be copied verbatim to the receiving network's ledger. The process incorporates transformations carried out through smart contract transactions occurring through their networks' native consensus mechanisms.__ + +## Vision: Network of Networks +The promise of blockchain was a more decentralized yet trustworthy internet, but as we saw earlier, networks like Bitcoin and Ethereum may not fulfill that promise, largely because they have technical limitations when it comes to performance and scaling, privacy preservation, and auditability. At the same time, private blockchain networks are here to stay, and they do overcome these technical limitations, albeit at smaller scale. In the longer term, a more sustainable and achievable vision will be to allow private networks to exist while possessing the means to interoperate with other private networks. The interlinking of a trade logistics network with a trade finance network was just a sample. There is more aspects to an international trade scenario: more networks and more cross-network dependencies. But as long as we can institute mechanisms to link two networks directly for data-sharing, we can extrapolate our two-network scenario into a network-of-networks scenario. + +To show how this will work, we will add two more networks to the mix. Business networks exist to track the quality and health of perishable goods from the production source to the end retailer. These networks complement networks like Trade Lens, which manage the long-distance shipping that occurs in the middle but have no visibiity into the goods before consignment creation or after delivery at the destination by the carrier. To track goods at either ends is the function of networks like IBM Food Trust, which would be ideal for the coffee shipment example we used earlier. A separate aspect of our trade scenario is the actual payment a buyer makes to the seller. Our trade finance network ends by recording a payment obligation, but the transfer of money needs to occur on a separate payment network, like, for example, the Stellar Network. + +The figure below presents our vision for how cross-network data sharing can help smoothen and simplify all aspects of global trade despite the fact that different sub-processes occur on independent private networks. + +![alt text](../../../static/use-cases/interop-four-networks-trade.png) + +The _Food Tracking Network_ is loosely inspired by IBM Food Trust and the _Payments Network_ loosely inspired by Stellar. +- The seller and buyer, as the trading parties, belong to the food tracking network. The process in this network begins with a registration of a purchase order, following which perishable goods (think coffee seeds, for example) are tracked from farms to a warehouse where a shipping consignment is created. Whenever the carier delivers the shipment, the fact of delivery is recorded as is the condition of the goods within. +- The payment network has the buyer's and seller's bank as members. Action in this network is triggered by the buyer's bank making a payment, or a monetary transfer, to the seller's bank. Both banks have accounts in this network and the payment succeeds or fails depending on available account balance. + +There are two parallel timelines starting at Step 17: +- One involves the trade finance network and the payments network (Steps 17-20). Step 18 contains both "Make Payment" and "Receive Payment" as these actions are supposed to occur atomically within the payments network. These pertain to the fulfilment of the payment promised to the seller by the buyer. +- Another involves the trade logistics network and the food tracking network (Steps 17-19). These pertain to the tracking of goods after dispatch and confirmation of their subsequent delivery and condition. + +You may notice we have augmented the trade logistics and trade finance processes as follows: +- Step 17 in the trade logistics network illustrates a sequence of transactions, each recording the location and condition of the goods in transit at periodic intervals. We assume that this information can be procured using sensors deployed with the consignment. +- Step 20 in the trade finance network results in the cancelling of the payment obligation recorded by the seller's bank in Step 17 within that network ("Request Payment"), thereby concluding the trade instance associated with the purchase order id generated in Step 1. + +The data-sharing interoperation steps are as follows: + +- _Step 3_: The trade finance network fetches a purchase order from the food tracking network before permitting an L/C request to be made. +- _Step 8_: The trade logistics network fetches an L/C from the trade finance network before permitting a consignment to be created. +- _Step 9_: The food tracking network fetches a consignment booking record and an associated L/C from the trade logistics network before permitting tracking of goods from the source to the shipping warehouse. +- _Step 11_: The trade logistics network fetches tracking information indicating delivery of goods to the warehouse before permitting a consignment to be created. +- _Step 16_: The trade finance network fetches a B/L from the trade logistics network before permitting the seller's bank to register a payment request. +- _Step 18_: This is a recurring step, in each instance of which the food tracking network fetches location and condition information for a given consignment from the trade logistics network, and does not permit the confirmation of consignment delivery and the integrity of the goods within until the shipment reaches its destination and its condition meets the required standard. - _Step 19_: The trade finance network gets confirmation of payment (from buyer's account to seller's account) from the payments network. - -To summarize, internationally traded goods can be tracked from a farm in one country to a retailer in another, the goods can be exported and shipped with all regulations complied with, financial guarantees can be put in place to safeguard the trading parties, and cross-border payments can be processed seamlessly and in a trustworthy manner. But this requires a combination of private blockchain networks willing to share data with each other and also have the ability to verify the authenticity of received data. We hope this scenario makes the motivation for data-sharing interoperation mechanisms perfectly clear. + +To summarize, internationally traded goods can be tracked from a farm in one country to a retailer in another, the goods can be exported and shipped with all regulations complied with, financial guarantees can be put in place to safeguard the trading parties, and cross-border payments can be processed seamlessly and in a trustworthy manner. But this requires a combination of private blockchain networks willing to share data with each other and also have the ability to verify the authenticity of received data. We hope this scenario makes the motivation for data-sharing interoperation mechanisms perfectly clear. diff --git a/weaver/docs/docs/external/user-stories/legacy-integration.md b/weaver/docs/docs/external/user-stories/legacy-integration.md index 404c8baf5b..b7431a84d3 100644 --- a/weaver/docs/docs/external/user-stories/legacy-integration.md +++ b/weaver/docs/docs/external/user-stories/legacy-integration.md @@ -1,7 +1,7 @@ ---- -id: legacy-integration -title: Legacy Integration ---- +--- +id: legacy-integration +title: Legacy Integration +--- -A standard for self-contained messages respresenting state in distributed ledgers, along with proofs of validity, enables interoperability with legacy enterprise applications. These messages can be consumed, stored or forwarded by any traditional centralized application. - - -![](/legacy-integration.jpg) - - - +A standard for self-contained messages respresenting state in distributed ledgers, along with proofs of validity, enables interoperability with legacy enterprise applications. These messages can be consumed, stored or forwarded by any traditional centralized application. + + +![](/legacy-integration.jpg) + + + diff --git a/weaver/docs/docs/external/user-stories/overview.md b/weaver/docs/docs/external/user-stories/overview.md index b5992a91f0..d9f4ef53cb 100644 --- a/weaver/docs/docs/external/user-stories/overview.md +++ b/weaver/docs/docs/external/user-stories/overview.md @@ -1,7 +1,7 @@ ---- -id: overview -title: Overview ---- +--- +id: overview +title: Overview +--- -In the [introduction](../interoperability-modes.md), we listed various modes (or patterns) of interoperation like asset transfers, asset exchanges, and data sharing. In IT parlance, we can think of this as a _horizontal_ classification of use cases for interoperability. In this section of the documentation, we will discuss the _verticals_, or application domains, that exemplify the use and necessity of interoperation mechanisms. - -## Application Domains - -Distributed ledger technology has been applied gainfully to several areas where legacy processes were inefficient, cumbersome, and error-prone. With the enablement of interoperation among these networks, they have the potential to take the next step toward a truly decentralized yet trustworthy internet. We call out two prominent focus areas. - -### Global Trade -Trade when seen from a global and international perspective is highly complex. In the absence of central coordinating and law-enforcing authorities at the world level, various ad hoc processes have been created and refined over centuries by merchants, financiers, and regulators, to manage complex supply-chain logistics and cross-border financing that underpin global trade. These processes exist to ensure that parties can hedge their risks, mitigate possibilities for non-compliance, and ship goods from one location to another while complying with regulatory guidelines. - -Multiple networks have emerged to handle trade processes limited in scope. There exist networks to handle trade logistics (like TradeLens, built on Hyperledger Fabric), food tracking (IBM Food Trust, built on Hyperledger Fabric), trade finance (like We.Trade, built on Hyperledger Fabric, and Marco Polo, built on R3 Corda), cross-border payments, and _know-your-customer_, or KYC, processes. An end-to-end trade scenario, involving shipment of goods, financing commitments, documentation, shipping, tracking, and payments, will rely on many or all of these networks. Interoperation will help us overcome this fragmentation and lack of visibility of one network into another, and enable trustworthy and efficient trades at global scale using blockchain technology. See [Global Trade](./global-trade.md) for a concrete example. - -### Financial Markets -Securities trading is a common and lucrative transaction in financial markets. As with any form of exchange, when a security is sold in exchange for money, the party that gives up its asset first faces a _non-compliance risk_; i.e., the other party may renege on the deal after it receives an asset. With the advent of blockchain-backed digital currencies maintained by countries' central banks, opportunities now exist to carry out security trades safely and efficiently. But this requires interoperation between networks managing digital currency on behalf of central banks (like private versions of Bitcoin networks with faster commitment times) and networks managing tracking securities and their ownerships. See [DvP in Financial Markets](./financial-markets.md) for a concrete example. - -### Other Scenarios -There are other domains or _verticals_ we can think of that would benefit from interoperation. Healthcare is one, where different networks may exist: citizens' identity records, employer network, healthcare provider network, insurance companies' network, etc. For efficiency of operation (with privacy preservation guarantees) and to ensure that service and payments occur promptly and accurately, these networks may seek to interoperate. Similarly, interoperation between networks that manage users' academic and professional credentials may help employers and job seekers. +In the [introduction](../interoperability-modes.md), we listed various modes (or patterns) of interoperation like asset transfers, asset exchanges, and data sharing. In IT parlance, we can think of this as a _horizontal_ classification of use cases for interoperability. In this section of the documentation, we will discuss the _verticals_, or application domains, that exemplify the use and necessity of interoperation mechanisms. + +## Application Domains + +Distributed ledger technology has been applied gainfully to several areas where legacy processes were inefficient, cumbersome, and error-prone. With the enablement of interoperation among these networks, they have the potential to take the next step toward a truly decentralized yet trustworthy internet. We call out two prominent focus areas. + +### Global Trade +Trade when seen from a global and international perspective is highly complex. In the absence of central coordinating and law-enforcing authorities at the world level, various ad hoc processes have been created and refined over centuries by merchants, financiers, and regulators, to manage complex supply-chain logistics and cross-border financing that underpin global trade. These processes exist to ensure that parties can hedge their risks, mitigate possibilities for non-compliance, and ship goods from one location to another while complying with regulatory guidelines. + +Multiple networks have emerged to handle trade processes limited in scope. There exist networks to handle trade logistics (like TradeLens, built on Hyperledger Fabric), food tracking (IBM Food Trust, built on Hyperledger Fabric), trade finance (like We.Trade, built on Hyperledger Fabric, and Marco Polo, built on R3 Corda), cross-border payments, and _know-your-customer_, or KYC, processes. An end-to-end trade scenario, involving shipment of goods, financing commitments, documentation, shipping, tracking, and payments, will rely on many or all of these networks. Interoperation will help us overcome this fragmentation and lack of visibility of one network into another, and enable trustworthy and efficient trades at global scale using blockchain technology. See [Global Trade](./global-trade.md) for a concrete example. + +### Financial Markets +Securities trading is a common and lucrative transaction in financial markets. As with any form of exchange, when a security is sold in exchange for money, the party that gives up its asset first faces a _non-compliance risk_; i.e., the other party may renege on the deal after it receives an asset. With the advent of blockchain-backed digital currencies maintained by countries' central banks, opportunities now exist to carry out security trades safely and efficiently. But this requires interoperation between networks managing digital currency on behalf of central banks (like private versions of Bitcoin networks with faster commitment times) and networks managing tracking securities and their ownerships. See [DvP in Financial Markets](./financial-markets.md) for a concrete example. + +### Other Scenarios +There are other domains or _verticals_ we can think of that would benefit from interoperation. Healthcare is one, where different networks may exist: citizens' identity records, employer network, healthcare provider network, insurance companies' network, etc. For efficiency of operation (with privacy preservation guarantees) and to ensure that service and payments occur promptly and accurately, these networks may seek to interoperate. Similarly, interoperation between networks that manage users' academic and professional credentials may help employers and job seekers. diff --git a/weaver/docs/docs/external/what-is-interoperability/integration-patterns.md b/weaver/docs/docs/external/what-is-interoperability/integration-patterns.md index 9d20e13125..0b8d383cb5 100644 --- a/weaver/docs/docs/external/what-is-interoperability/integration-patterns.md +++ b/weaver/docs/docs/external/what-is-interoperability/integration-patterns.md @@ -1,7 +1,7 @@ ---- -id: integration-patterns -title: Integration Patterns ---- +--- +id: integration-patterns +title: Integration Patterns +--- -Integration patterns are well-known reusable solutions for integrating systems together. A number of patterns exist for addressing various types integration problems. The specific pattern applied in practice depends on the nature of the integration problem, the overall objective of the integration task, trade-offs in alternate approaches, and potential risks. - - - -## Distributed Ledger Integration Patterns - -Here we present common patterns for integrating distributed ledgers. Not all problems are equal, some approaches to itegrating ledgers are preferred over others depending on the use case, the purpose of the itegration and the risks involved. - -### Consensus-based integration between ledgers - -Consensus-based integration aims to communicate the consensus view of one network to another. The consensus view is a representation of state on the ledger that is collectively agreed by the members of the network. This form of integration provides the highest assurance on the validity of state. The Weaver framework is designed to address consensus-based integration between ledgers built on different distributed ledger protocols. - -![](/integration-pattern-consensus-driven.jpg) - -### Standard API integration between applications - -A standard API integration relies on a single party exposing an endpoint for state exchange. The validity of state relies entirely on the trust placed on the party exposing the endpoint. - -![](/integration-pattern-single-party-api.jpg) - -### Single enterprise participating in multiple neworks - -A single enterprise participating in multiple networks can integrate state and contract logic across these networks using off-chain workflows. Unlike the previous pattern, this pattern relies on the enterprise having valid membership credentials on multiple networks. Significant trust must be placed on the organization coordianting the exchange of state across these networks. - -![](/integration-pattern-single-enterprise-multiple-networks.jpg) - -### Single network deployed on multiple heterogenous infrastructure - -Although not an integration pattern, this pattern demonstrates interoperability at the infrastructure layer. The ability to run nodes on multiple cloud providers, as well as on-prem infrastructure, ensures networks are resilient to failures or censorship by infrastructure providers. - -![](/integration-pattern-single-network-multiple-cloud.jpg) +Integration patterns are well-known reusable solutions for integrating systems together. A number of patterns exist for addressing various types integration problems. The specific pattern applied in practice depends on the nature of the integration problem, the overall objective of the integration task, trade-offs in alternate approaches, and potential risks. + + + +## Distributed Ledger Integration Patterns + +Here we present common patterns for integrating distributed ledgers. Not all problems are equal, some approaches to itegrating ledgers are preferred over others depending on the use case, the purpose of the itegration and the risks involved. + +### Consensus-based integration between ledgers + +Consensus-based integration aims to communicate the consensus view of one network to another. The consensus view is a representation of state on the ledger that is collectively agreed by the members of the network. This form of integration provides the highest assurance on the validity of state. The Weaver framework is designed to address consensus-based integration between ledgers built on different distributed ledger protocols. + +![](/integration-pattern-consensus-driven.jpg) + +### Standard API integration between applications + +A standard API integration relies on a single party exposing an endpoint for state exchange. The validity of state relies entirely on the trust placed on the party exposing the endpoint. + +![](/integration-pattern-single-party-api.jpg) + +### Single enterprise participating in multiple neworks + +A single enterprise participating in multiple networks can integrate state and contract logic across these networks using off-chain workflows. Unlike the previous pattern, this pattern relies on the enterprise having valid membership credentials on multiple networks. Significant trust must be placed on the organization coordianting the exchange of state across these networks. + +![](/integration-pattern-single-enterprise-multiple-networks.jpg) + +### Single network deployed on multiple heterogenous infrastructure + +Although not an integration pattern, this pattern demonstrates interoperability at the infrastructure layer. The ability to run nodes on multiple cloud providers, as well as on-prem infrastructure, ensures networks are resilient to failures or censorship by infrastructure providers. + +![](/integration-pattern-single-network-multiple-cloud.jpg) diff --git a/weaver/docs/docs/external/what-is-interoperability/levels-of-interoperability.md b/weaver/docs/docs/external/what-is-interoperability/levels-of-interoperability.md index 5ddd1b1966..692097013c 100644 --- a/weaver/docs/docs/external/what-is-interoperability/levels-of-interoperability.md +++ b/weaver/docs/docs/external/what-is-interoperability/levels-of-interoperability.md @@ -1,7 +1,7 @@ ---- -id: levels-of-interoperability -title: Levels of Interoperability ---- +--- +id: levels-of-interoperability +title: Levels of Interoperability +--- -Established models of information systems interoperability stratify interoperability concerns into multiple levels. This includes technical, syntactic, semantic and application levels as shown below. - -Above the protocol and application levels there are two additional levels that require careful attention when enabling interoperability. These cover governance and policy decisions when communicating state as well as the legal and regulatory implications of networks under different jurisdictions. - -![](/levels-of-interoperability.jpg) - - -* **Technical**: The technical level is a low-level concern that focuses on the underlying wire protocol used for communication. Examples of protocols at this level include gRPC, Apache Thrift, ASN.1 and CBOR. Protocols at this level are point-to-point and addresses additional concerns such as version negotiation and message delivery guarantees. - -* **Syntactic**: The syntactic level is concerned with the structure and format of the messages exchanged. This includes protocol elements such as keywords and types. Examples include protocols defined using Google's Protocol Buffers, JSON-RPC and ASN.1. - -* **Semantic**: The semantic level provides meaning to the messages exchanged. In the context of cross-chain communication, this includes messages that represent a data transfer or an asset exchange as well as other information such as validity proofs and actors involved. - -* **Application**: The application level addresses domain or use-case specific concerns. In this level, interoperability deals with industry standard data models (e.g. supply chain standards such as GS1) and business processes. This level is orthogonal to the technology concerns of interoperability. - -* **Governance and Policies**: The governing members of a ledger play a critical role in extending business processes to external systems. Interoperability necessitates that the governing bodies of the respective systems agree on the nature of their collaboration. The policies enforce these decisions and covers aspects such as access control and conditions for determining the validity of state proofs. - -* **Legal and Regulation**: Networks residing in different jurisdictions must be comply with existing laws and regulations when communicating state. - - - +Established models of information systems interoperability stratify interoperability concerns into multiple levels. This includes technical, syntactic, semantic and application levels as shown below. + +Above the protocol and application levels there are two additional levels that require careful attention when enabling interoperability. These cover governance and policy decisions when communicating state as well as the legal and regulatory implications of networks under different jurisdictions. + +![](/levels-of-interoperability.jpg) + + +* **Technical**: The technical level is a low-level concern that focuses on the underlying wire protocol used for communication. Examples of protocols at this level include gRPC, Apache Thrift, ASN.1 and CBOR. Protocols at this level are point-to-point and addresses additional concerns such as version negotiation and message delivery guarantees. + +* **Syntactic**: The syntactic level is concerned with the structure and format of the messages exchanged. This includes protocol elements such as keywords and types. Examples include protocols defined using Google's Protocol Buffers, JSON-RPC and ASN.1. + +* **Semantic**: The semantic level provides meaning to the messages exchanged. In the context of cross-chain communication, this includes messages that represent a data transfer or an asset exchange as well as other information such as validity proofs and actors involved. + +* **Application**: The application level addresses domain or use-case specific concerns. In this level, interoperability deals with industry standard data models (e.g. supply chain standards such as GS1) and business processes. This level is orthogonal to the technology concerns of interoperability. + +* **Governance and Policies**: The governing members of a ledger play a critical role in extending business processes to external systems. Interoperability necessitates that the governing bodies of the respective systems agree on the nature of their collaboration. The policies enforce these decisions and covers aspects such as access control and conditions for determining the validity of state proofs. + +* **Legal and Regulation**: Networks residing in different jurisdictions must be comply with existing laws and regulations when communicating state. + + + diff --git a/weaver/docs/docs/external/what-is-interoperability/understanding-interoperability.md b/weaver/docs/docs/external/what-is-interoperability/understanding-interoperability.md index fbf777623a..f17a8a66a9 100644 --- a/weaver/docs/docs/external/what-is-interoperability/understanding-interoperability.md +++ b/weaver/docs/docs/external/what-is-interoperability/understanding-interoperability.md @@ -1,7 +1,7 @@ ---- -id: understanding-interoperability -title: Understanding Interoperability ---- +--- +id: understanding-interoperability +title: Understanding Interoperability +--- -Permissioned DLTs have been gaining significant traction in industry since their inception. They have enabled enterprises to harness the innovation of public blockchains, while adhering to the privacy, confidentiality and regulatory constraints that businesses operate under. Permissioned DLTs offer enterprises an infrastructure for managing inter-firm asset, data and business workflow, without the need for a central intermediary that introduces additional sources of risk. Businesses are able to transact directly while reducing counter-party risk and mitigating the need for costly and time-consuming dispute resolution processes, often involving legal and judicial systems. Thus far, the application of this technology has enabled digitisation and disintermediation of many entrenched industry processes, resulting in significant improvements in efficiency, transparency, risk and fraud. - -For practical reasons, the adoption of permissioned blockchains has thus far been driven through use-cases. Enterprises have been coalescing into consortia to create specialised networks that address narrowly-scoped use-cases in isolation. -This use-case driven approach to blockchain adoption is creating a proliferation of niche and isolated networks that are quickly becoming data and value silos. -In addition, these use-cases often represent a slice of a complex end-to-end business process. To deliver real value, permissioned networks need to seamlessly integrate with each other and with existing systems in order to holistically transform industries. This requirement for interoperation is coming to the fore as networks transition to production and scale towards broader adoption. - -Interoperability in the context of Distributed Ledger Technologies involves enabling the seamless flow of data and value across disparate networks in a manner that preserves their trust and security tenets. This capability can offer a number of benefits such as: - -- Removing data and value silos -- Increasing market sizes, liquidity and overall efficiency -- Improving network effects -- Enabling orchestration of complex business functionality across networks -- Enabling scale and groawth of networks -- Encouraging further adoption of the technology - - -## Unique Technical Challenges -Enabling interoperation between distributed ledgers presents numerous technical challenges compared to traditional systems integration approaches. This primarily stems from the need to preserve the benefits of decentralised trust beyond the boundaries of a single network. Hence, a naive approach to interoperability based on traditional point-to-point API integration is insufficient for preserving the underlying trust decentralised networks provide. There are two unique challenges present in DLT interoperation: - -### Single-party vs Multi-party Trust  -In distributed ledger architectures, the authority over state lies in a collective and the protocol they employ to ensure its integrity. When one network or an entity consumes state from another, it would need to establish the veracity of the state according to the shared consensus view of parties in the network. This requirement is different than traditional integration with centralised systems wherein the trust for the validity of data is placed on the single party providing the data. Establishing the veracity of state in a decentralized network is not trivial. In most cases, a consumer of state might not be able to observe the full ledger of the network itself. Hence, a consumer needs to obtain an independently verifiable cryptographic proof on the validity of state according to the consensus rules and policies of the source network. - -![single-party vs multi-party trust model](/multi-party-trust-model.png) - -### Data vs Asset -Interoperation should not compromise the invariants enforced by individual networks such as protections against double spends on assets. - - -## The Role of Standards - -The term ‘interoperability’ is used rather loosely in many contexts and oftentimes without the same implication. What some call ‘interoperability’, others refer to as ‘integration’, ‘interconnectivity’ or ‘compatibility’. - -The primary goal of interoperability is freedom of choice. Interoperability enables users to choose implementations of systems they find suitable for a given problem without constraints on the system’s ability to communicate with other implementations. - -Implicit in the term interoperability is open standards, which distinguishes it from any form of bespoke integration. Open standards can either be de jure standards ratified by a formal standards organization such as ANSI, IETF, or ISO, or de facto standards proposed and adopted by communities, industries and the market. Open standards enable and encourage implementors to build systems that can work together. - - +Permissioned DLTs have been gaining significant traction in industry since their inception. They have enabled enterprises to harness the innovation of public blockchains, while adhering to the privacy, confidentiality and regulatory constraints that businesses operate under. Permissioned DLTs offer enterprises an infrastructure for managing inter-firm asset, data and business workflow, without the need for a central intermediary that introduces additional sources of risk. Businesses are able to transact directly while reducing counter-party risk and mitigating the need for costly and time-consuming dispute resolution processes, often involving legal and judicial systems. Thus far, the application of this technology has enabled digitisation and disintermediation of many entrenched industry processes, resulting in significant improvements in efficiency, transparency, risk and fraud. + +For practical reasons, the adoption of permissioned blockchains has thus far been driven through use-cases. Enterprises have been coalescing into consortia to create specialised networks that address narrowly-scoped use-cases in isolation. +This use-case driven approach to blockchain adoption is creating a proliferation of niche and isolated networks that are quickly becoming data and value silos. +In addition, these use-cases often represent a slice of a complex end-to-end business process. To deliver real value, permissioned networks need to seamlessly integrate with each other and with existing systems in order to holistically transform industries. This requirement for interoperation is coming to the fore as networks transition to production and scale towards broader adoption. + +Interoperability in the context of Distributed Ledger Technologies involves enabling the seamless flow of data and value across disparate networks in a manner that preserves their trust and security tenets. This capability can offer a number of benefits such as: + +- Removing data and value silos +- Increasing market sizes, liquidity and overall efficiency +- Improving network effects +- Enabling orchestration of complex business functionality across networks +- Enabling scale and groawth of networks +- Encouraging further adoption of the technology + + +## Unique Technical Challenges +Enabling interoperation between distributed ledgers presents numerous technical challenges compared to traditional systems integration approaches. This primarily stems from the need to preserve the benefits of decentralised trust beyond the boundaries of a single network. Hence, a naive approach to interoperability based on traditional point-to-point API integration is insufficient for preserving the underlying trust decentralised networks provide. There are two unique challenges present in DLT interoperation: + +### Single-party vs Multi-party Trust  +In distributed ledger architectures, the authority over state lies in a collective and the protocol they employ to ensure its integrity. When one network or an entity consumes state from another, it would need to establish the veracity of the state according to the shared consensus view of parties in the network. This requirement is different than traditional integration with centralised systems wherein the trust for the validity of data is placed on the single party providing the data. Establishing the veracity of state in a decentralized network is not trivial. In most cases, a consumer of state might not be able to observe the full ledger of the network itself. Hence, a consumer needs to obtain an independently verifiable cryptographic proof on the validity of state according to the consensus rules and policies of the source network. + +![single-party vs multi-party trust model](/multi-party-trust-model.png) + +### Data vs Asset +Interoperation should not compromise the invariants enforced by individual networks such as protections against double spends on assets. + + +## The Role of Standards + +The term ‘interoperability’ is used rather loosely in many contexts and oftentimes without the same implication. What some call ‘interoperability’, others refer to as ‘integration’, ‘interconnectivity’ or ‘compatibility’. + +The primary goal of interoperability is freedom of choice. Interoperability enables users to choose implementations of systems they find suitable for a given problem without constraints on the system’s ability to communicate with other implementations. + +Implicit in the term interoperability is open standards, which distinguishes it from any form of bespoke integration. Open standards can either be de jure standards ratified by a formal standards organization such as ANSI, IETF, or ISO, or de facto standards proposed and adopted by communities, industries and the market. Open standards enable and encourage implementors to build systems that can work together. + + diff --git a/weaver/docs/docs/internal/activity-plan.md b/weaver/docs/docs/internal/activity-plan.md index d45e791a31..e12526b543 100644 --- a/weaver/docs/docs/internal/activity-plan.md +++ b/weaver/docs/docs/internal/activity-plan.md @@ -3,9 +3,9 @@ SPDX-License-Identifier: CC-BY-4.0 --> ---- -id: activity-plan -title: Activity Plan ---- - +--- +id: activity-plan +title: Activity Plan +--- + # Milestones and Activities \ No newline at end of file diff --git a/weaver/docs/docs/internal/documentation-guidelines.md b/weaver/docs/docs/internal/documentation-guidelines.md index df8302c2a3..9831ad6ab7 100644 --- a/weaver/docs/docs/internal/documentation-guidelines.md +++ b/weaver/docs/docs/internal/documentation-guidelines.md @@ -3,9 +3,9 @@ SPDX-License-Identifier: CC-BY-4.0 --> ---- -id: documentation-guidelines -title: Documentation Guidelines ---- - +--- +id: documentation-guidelines +title: Documentation Guidelines +--- + # Documentation Guidelines \ No newline at end of file diff --git a/weaver/docs/docs/internal/team.md b/weaver/docs/docs/internal/team.md index a5952d6a76..061ac3fd28 100644 --- a/weaver/docs/docs/internal/team.md +++ b/weaver/docs/docs/internal/team.md @@ -3,7 +3,7 @@ SPDX-License-Identifier: CC-BY-4.0 --> ---- -id: team -title: Team ---- +--- +id: team +title: Team +--- diff --git a/weaver/rfcs/ABNF-definitions.md b/weaver/rfcs/ABNF-definitions.md index 2f37fd99e7..6c244f55f6 100644 --- a/weaver/rfcs/ABNF-definitions.md +++ b/weaver/rfcs/ABNF-definitions.md @@ -3,8 +3,8 @@ SPDX-License-Identifier: CC-BY-4.0 --> -# Common ABNF Definitions - -``` -separator = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | "/" | "[" | "]" | "?" | "=" | "{" | "}" | "#" | "|" -``` +# Common ABNF Definitions + +``` +separator = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | "/" | "[" | "]" | "?" | "=" | "{" | "}" | "#" | "|" +``` diff --git a/weaver/rfcs/formats/policies/access-control.md b/weaver/rfcs/formats/policies/access-control.md index 3458c6eb3c..47039346bd 100644 --- a/weaver/rfcs/formats/policies/access-control.md +++ b/weaver/rfcs/formats/policies/access-control.md @@ -3,76 +3,76 @@ SPDX-License-Identifier: CC-BY-4.0 --> -# Access Control Policies +# Access Control Policies - RFC: 03-008 - Authors: Dileban Karunamoorthy, Ermyas Abebe, Venkatraman Ramakrishna, Nick Waywood - Status: Proposed - Since: 10-Aug-2020 -## Summary - -- Access control policies are a means to control what objects on a ledger are accessbile to external entities and the type of actions these entities can exercise. +## Summary + +- Access control policies are a means to control what objects on a ledger are accessbile to external entities and the type of actions these entities can exercise. - Access control policies are applied against a [security domain](../../models/security/security-domains.md), which can represent entities such as a thing, an individual, an organization or a group of entities such as network. - -## Defining Access Control Policies - -A verification policy has the following structure: - -```protobuf -message AccessControlPolicy { - string securityDomain = 1; - repeated Rule rules = 2; -} - -// Rule represents a single data access rule for the AccessControlPolicy -message Rule { - string principal = 1; - string principalType = 2; - string resource = 3; - bool read = 4; -} -``` - -An access control policy is a set of access _rules_ applied to a security domain, where each rule contains: - -- _principal_ - A security principal an external subject resolves to. When requesting access, the subject must present valid credentials identifying itself with a security domain. -- _principalType_ - The type of identifier used in the principal field (e.g. public-key) -- _resource_ - Represents an artifact on the ledger. The type of resources guarded can vary depending on the underlying ledger technology and can include references to business objects, smart contracts, smart contract functions, or other types of code that can result in access to state. The resource can be an exact string match of one of these entities or it can contain a star for fuzzy matching, see below for details -- _read_ - Specifies whether the rule is currently active or not. - -Access policy definitions afford a lot of flexibility in defining rules. Here are a few examples: - -- A policy defined on a security domain identified by "\*" applies to all subjects. This provides any authenticated entity access to objects listed in the rule set. The type of the principal in this case would also be "\*". -- The _resource_ can contain a "\*" in it to support fuzzy matching. To be a valid pattern, it must contain 0 or 1 stars, and if it has one star, it must be at the end of the string. This restriction exists so that it is possible to determine to most specific access control rule for a given address. -- The _principalType_ in a rule can be one of: "\*" | "public-key" | "ca" | "role" | "attribute". This allows for access to all subjects in a security domain ("\*") or, restricts access to subjects with a specific public key, restricts access to subjects whose certificates were issued by a known certificate authority, or subjects with a specific role or attribute defined in their certificate. - -## Examples - -```json -{ - "securityDomain": "", - "rules": [ - { - "principal": "", - "principalType": "public-key", - "resource": "state:BOL10001", - "read": true - } - { - "principal": "intermediate-ca-org3", - "principalType": "ca", - "resource": "*", - "read": true - }, - { - "principal": "intermediate-ca-org3", - "principalType": "ca", - "resource": "state:*", - "read": true - } - ] -} -``` - -**TODO**: Where do we define how to represent DLT specific objects (chaincode, flows etc)? + +## Defining Access Control Policies + +A verification policy has the following structure: + +```protobuf +message AccessControlPolicy { + string securityDomain = 1; + repeated Rule rules = 2; +} + +// Rule represents a single data access rule for the AccessControlPolicy +message Rule { + string principal = 1; + string principalType = 2; + string resource = 3; + bool read = 4; +} +``` + +An access control policy is a set of access _rules_ applied to a security domain, where each rule contains: + +- _principal_ - A security principal an external subject resolves to. When requesting access, the subject must present valid credentials identifying itself with a security domain. +- _principalType_ - The type of identifier used in the principal field (e.g. public-key) +- _resource_ - Represents an artifact on the ledger. The type of resources guarded can vary depending on the underlying ledger technology and can include references to business objects, smart contracts, smart contract functions, or other types of code that can result in access to state. The resource can be an exact string match of one of these entities or it can contain a star for fuzzy matching, see below for details +- _read_ - Specifies whether the rule is currently active or not. + +Access policy definitions afford a lot of flexibility in defining rules. Here are a few examples: + +- A policy defined on a security domain identified by "\*" applies to all subjects. This provides any authenticated entity access to objects listed in the rule set. The type of the principal in this case would also be "\*". +- The _resource_ can contain a "\*" in it to support fuzzy matching. To be a valid pattern, it must contain 0 or 1 stars, and if it has one star, it must be at the end of the string. This restriction exists so that it is possible to determine to most specific access control rule for a given address. +- The _principalType_ in a rule can be one of: "\*" | "public-key" | "ca" | "role" | "attribute". This allows for access to all subjects in a security domain ("\*") or, restricts access to subjects with a specific public key, restricts access to subjects whose certificates were issued by a known certificate authority, or subjects with a specific role or attribute defined in their certificate. + +## Examples + +```json +{ + "securityDomain": "", + "rules": [ + { + "principal": "", + "principalType": "public-key", + "resource": "state:BOL10001", + "read": true + } + { + "principal": "intermediate-ca-org3", + "principalType": "ca", + "resource": "*", + "read": true + }, + { + "principal": "intermediate-ca-org3", + "principalType": "ca", + "resource": "state:*", + "read": true + } + ] +} +``` + +**TODO**: Where do we define how to represent DLT specific objects (chaincode, flows etc)? diff --git a/weaver/rfcs/formats/policies/proof-verification.md b/weaver/rfcs/formats/policies/proof-verification.md index 5149d432f9..f8c8a90ab6 100644 --- a/weaver/rfcs/formats/policies/proof-verification.md +++ b/weaver/rfcs/formats/policies/proof-verification.md @@ -3,128 +3,128 @@ SPDX-License-Identifier: CC-BY-4.0 --> -# Proof Verification Policies +# Proof Verification Policies - RFC: 03-009 - Authors: Allison Irvin, Dileban Karunamoorthy, Ermyas Abebe, Venkatraman Ramakrishna, Nick Waywood - Status: Proposed - Since: 10-Aug-2020 -## Summary - -- Networks may optionally maintain verification policies that can be applied against state proofs from remote networks. -- These policies describe the minimum criteria a proof must satisfy for it to be considered valid. -- It is possible, and sometimes desirable, for networks to apply a stronger criteria for proof validation than what a remote network might deem sufficient. - -## Defining Verification Policies - -The `criteria` for a policy defines the combinatiton of signatures that need to be provided to satisfy validity for that view, and is expressed as boolean logic that is encoded using the [JsonLogic syntax](http://jsonlogic.com/). - -A verification policy has the following structure: - -```protobuf -message VerificationPolicy { - string securityDomain = 1; - repeated Rule rules = 2; -} - -// The Policy captures the list of parties that are required to provide proofs -// of a view in order for the Fabric network to accept the view as valid. -message Policy { - string type = 1; - repeated string criteria = 2; -} - -// List of rules for the VerificationPolicy -message Rule { - // pattern defines the view/views that this rule applies to - // A rule may contain a "*" at the end of the pattern - string pattern = 1; - Policy policy = 2; -} -``` - -A verification policy is a set of access _rules_ applied to a security domain, where each rule contains: - -- _pattern_ - Represents an artifact on the ledger. The type of resources guarded by the pattern can vary depending on the underlying ledger technology and can include references to business objects, smart contracts, smart contract functions, or other types of code that can result in access to state. The resource can be an exact string match of one of these entities or it can contain a star for fuzzy matching, see below for details -- _policy_ - The Policy captures the list of parties that are required to provide proofs of a view in order for the Fabric network to accept the view as valid. - -## Examples - -A sample policy for verifying proofs from a permissioned trade network. - -```json -{ - "securityDomain": "trade-network", - "rules": [ - { - "pattern": "trade-channel:trade-chaincode:getbilloflading:10012", - "policy": { - "type": "signature", - "criteria": { - "and": ["org1", "org2"] - } - } - }, - { - "pattern": "trade-channel:trade-chaincode:*", - "policy": { - "type": "signature", - "criteria": { - "and": ["org1", "org2"] - } - } - } - ] -} -``` - -The values of a criteria can be parameterized. Parameters can be view derived e.g. attribute values, or can be named regex captures from the identifier or pattern fields. The example below demonstrates how this can be utilised, where `:issuer:id` and `:beneficiary:id` represent attributes that are extracted from the view. This type of criteria specification is useful for states where the validity of the state solely relies on the parties listed in the states (e.g. Bank Guarantees, Letter of Credit, legal contracts). -The mechaism for extracting the value from these attributes would need to be delegated back to the application contract, since the interoperation layer does not have knowledge of the data-model of a view. The `$issuer_id` parameter represents a captured group from the pattern field. - -```json -{ - "securityDomain": "lygon", - "rules": [ - { - "pattern": "lygon:bg-channel-issuer:bg-chaincode:getBankGuarantee:*", - "policy": { - "type": "signature", - "criteria": { - "or": [ - { - "and": [":issuer:id", ":beneficiary:id"] - }, - { - "and": [":issuer:id", "Lygon1B_org"] - }, - { - "and": ["$issuer", "Lygon1B_org"] - } - ] - } - } - } - ] -} -``` - -A sample policy for verifying proofs from the bitcoin network. - -```json -{ - "securityDomain": "bitcoin", - "rules": [ - { - "identifier": "", - "policy": { - "type": "pow", - "criteria": { - "blocks": 6, - "difficulty": "" - } - } - } - ] -} -``` +## Summary + +- Networks may optionally maintain verification policies that can be applied against state proofs from remote networks. +- These policies describe the minimum criteria a proof must satisfy for it to be considered valid. +- It is possible, and sometimes desirable, for networks to apply a stronger criteria for proof validation than what a remote network might deem sufficient. + +## Defining Verification Policies + +The `criteria` for a policy defines the combinatiton of signatures that need to be provided to satisfy validity for that view, and is expressed as boolean logic that is encoded using the [JsonLogic syntax](http://jsonlogic.com/). + +A verification policy has the following structure: + +```protobuf +message VerificationPolicy { + string securityDomain = 1; + repeated Rule rules = 2; +} + +// The Policy captures the list of parties that are required to provide proofs +// of a view in order for the Fabric network to accept the view as valid. +message Policy { + string type = 1; + repeated string criteria = 2; +} + +// List of rules for the VerificationPolicy +message Rule { + // pattern defines the view/views that this rule applies to + // A rule may contain a "*" at the end of the pattern + string pattern = 1; + Policy policy = 2; +} +``` + +A verification policy is a set of access _rules_ applied to a security domain, where each rule contains: + +- _pattern_ - Represents an artifact on the ledger. The type of resources guarded by the pattern can vary depending on the underlying ledger technology and can include references to business objects, smart contracts, smart contract functions, or other types of code that can result in access to state. The resource can be an exact string match of one of these entities or it can contain a star for fuzzy matching, see below for details +- _policy_ - The Policy captures the list of parties that are required to provide proofs of a view in order for the Fabric network to accept the view as valid. + +## Examples + +A sample policy for verifying proofs from a permissioned trade network. + +```json +{ + "securityDomain": "trade-network", + "rules": [ + { + "pattern": "trade-channel:trade-chaincode:getbilloflading:10012", + "policy": { + "type": "signature", + "criteria": { + "and": ["org1", "org2"] + } + } + }, + { + "pattern": "trade-channel:trade-chaincode:*", + "policy": { + "type": "signature", + "criteria": { + "and": ["org1", "org2"] + } + } + } + ] +} +``` + +The values of a criteria can be parameterized. Parameters can be view derived e.g. attribute values, or can be named regex captures from the identifier or pattern fields. The example below demonstrates how this can be utilised, where `:issuer:id` and `:beneficiary:id` represent attributes that are extracted from the view. This type of criteria specification is useful for states where the validity of the state solely relies on the parties listed in the states (e.g. Bank Guarantees, Letter of Credit, legal contracts). +The mechaism for extracting the value from these attributes would need to be delegated back to the application contract, since the interoperation layer does not have knowledge of the data-model of a view. The `$issuer_id` parameter represents a captured group from the pattern field. + +```json +{ + "securityDomain": "lygon", + "rules": [ + { + "pattern": "lygon:bg-channel-issuer:bg-chaincode:getBankGuarantee:*", + "policy": { + "type": "signature", + "criteria": { + "or": [ + { + "and": [":issuer:id", ":beneficiary:id"] + }, + { + "and": [":issuer:id", "Lygon1B_org"] + }, + { + "and": ["$issuer", "Lygon1B_org"] + } + ] + } + } + } + ] +} +``` + +A sample policy for verifying proofs from the bitcoin network. + +```json +{ + "securityDomain": "bitcoin", + "rules": [ + { + "identifier": "", + "policy": { + "type": "pow", + "criteria": { + "blocks": 6, + "difficulty": "" + } + } + } + ] +} +``` diff --git a/weaver/rfcs/formats/views/corda.md b/weaver/rfcs/formats/views/corda.md index 0d9261d264..5fad04f610 100644 --- a/weaver/rfcs/formats/views/corda.md +++ b/weaver/rfcs/formats/views/corda.md @@ -3,228 +3,228 @@ SPDX-License-Identifier: CC-BY-4.0 --> -# Corda Views +# Corda Views - RFC: 03-004 - Authors: Allison Irvin, Nick Waywood - Status: Proposed - Since: 13-Aug-2020 -## Addressing a Corda View - -Unlike most distributed ledgers, Corda does not use a broadcast model but -instead shares transactions and state only with parties that need to know. This -peer-to-peer model means that addressing state in a Corda ledger needs to be -more fine-grained than in other DLTs. Instead of addressing a channel (as in -Fabric), the party or parties that are participants of the state need to be -specified. The participants also need a way of deterministically finding the -requested state. This can be done by using Corda's vault query API. - -### Identifying the participant list - -Requirements are: - -- The destination network knows the required endorsers of the state by the - identities included in their certificates. -- All the participants who are listed on the state need to return a state proof. -- The relay driver needs to know the RPC address of the node(s) to forward the request on to. - -There are several potential approaches for doing this. - +## Addressing a Corda View + +Unlike most distributed ledgers, Corda does not use a broadcast model but +instead shares transactions and state only with parties that need to know. This +peer-to-peer model means that addressing state in a Corda ledger needs to be +more fine-grained than in other DLTs. Instead of addressing a channel (as in +Fabric), the party or parties that are participants of the state need to be +specified. The participants also need a way of deterministically finding the +requested state. This can be done by using Corda's vault query API. + +### Identifying the participant list + +Requirements are: + +- The destination network knows the required endorsers of the state by the + identities included in their certificates. +- All the participants who are listed on the state need to return a state proof. +- The relay driver needs to know the RPC address of the node(s) to forward the request on to. + +There are several potential approaches for doing this. + 1. a) The requesting network lists the RPC addresses of the nodes in the view - address and the relay driver uses these addresses to directly query the - nodes. Endorsement policy in the request is optional. The relay driver sends - the request out to all nodes individually and collates the responses. This - approach is unlikely to be used in practise because the destination network - should not need to know the RPC address of the Corda nodes. - - b) The requesting network lists all the identities of the nodes in the - address and the relay driver uses a local database or config to lookup the - RPC address of the nodes. Endorsement policy in the request is optional. The - relay driver sends the request out to all nodes individually and collates the - responses. If the list of participants for a view is long, this approach may - not be ideal as the address field may grow quite large. - - c) The requesting network includes an alias for a group of nodes in the - address and the relay driver uses a local database or config to lookup the - RPC address of the nodes corresponding to that alias. Endorsement policy in - the request is optional. This approach may be better suited for scenarios - when the list of participants for a view is long. - -2. The requesting network lists the identities of the nodes in the endorsement - policy and the relay driver uses a local database or config to lookup the RPC - address of the nodes. The relay driver sends the request out to all nodes - individually and collates the responses. - -3. The requesting network lists one of the node identities in the address and - the relay driver uses a local database or config to lookup the RPC address of - the node. Endorsement policy is optional. The relay driver sends the request - out to just that node and the node looks up the state and forwards the - request on to all other participants and collates the responses before - sending back to the relay driver. - + address and the relay driver uses these addresses to directly query the + nodes. Endorsement policy in the request is optional. The relay driver sends + the request out to all nodes individually and collates the responses. This + approach is unlikely to be used in practise because the destination network + should not need to know the RPC address of the Corda nodes. + + b) The requesting network lists all the identities of the nodes in the + address and the relay driver uses a local database or config to lookup the + RPC address of the nodes. Endorsement policy in the request is optional. The + relay driver sends the request out to all nodes individually and collates the + responses. If the list of participants for a view is long, this approach may + not be ideal as the address field may grow quite large. + + c) The requesting network includes an alias for a group of nodes in the + address and the relay driver uses a local database or config to lookup the + RPC address of the nodes corresponding to that alias. Endorsement policy in + the request is optional. This approach may be better suited for scenarios + when the list of participants for a view is long. + +2. The requesting network lists the identities of the nodes in the endorsement + policy and the relay driver uses a local database or config to lookup the RPC + address of the nodes. The relay driver sends the request out to all nodes + individually and collates the responses. + +3. The requesting network lists one of the node identities in the address and + the relay driver uses a local database or config to lookup the RPC address of + the node. Endorsement policy is optional. The relay driver sends the request + out to just that node and the node looks up the state and forwards the + request on to all other participants and collates the responses before + sending back to the relay driver. + We propose that approach 1a is supported by the interoperation protocol in the -initial implementation. - -### Locating the view. - -Corda provides an API for querying state from the vault - the Vault Query API. -The Vault Query API can be used directly by flows, and therefore CorDapps can -define flows that perform a particular vault query. Corda applications can -provide a flow name and set of required parameters (either as a template or -with known values) as an address for a view. This approach has the benefit of -providing a mechanism for not only addressing a single state as a view, but also -collections of states, or even computing derived state. - -The first point of contact with the Corda network from the relay driver is the -interoperation CorDapp that is installed on all nodes that wish to interoperate -with external networks. The relay driver forwards the request from the external -network to the interoperation CorDapp which first checks that the requesting -party has the required access control permission for the view address based on -flow name. It then attempts to call the corresponding flow that must also be -installed on the node. The node will run the query and return the view to the -initiating flow. Additional access control checks may need to be performed at -this point (for example, if access control was defined on a per-state level). -The initiating flow will then coordinate the signing of the response and -assembly of the view proof to be sent back to the relay driver. - -If the approach taken in the addressing of nodes required to endorse the view -was for the requesting network to specify complete list, the interoperation -CorDapp will return the view and proof to the relay driver. The relay driver -will assemble responses from all nodes and return the set of responses back to -the requesting relay. - -If the approach for addressing required endorsing nodes was for just one -participating node to be specified, the interoperation CorDapp must determine -the list of required endorsers from the response returned from the application -CorDapp flow. For example, if a single state was returned, the interoperation -CorDapp will create a flow session for each participant listed in the state and -trigger the interoperation CorDapp flow in these nodes. The interoperation flow -in each node will perform the same steps as listed above (access control checks -for the flow, calling the application CorDapp flow, performing additional access -control checks on the returned view, assembling the view proof) and return the -view and proof response. The initiating interoperation flow will assemble the -responses from each node and return the set back to the relay driver. - -Part of view proof verification by the requesting network requires checking that -all views between endorsing nodes are consistent. This means that the view -returned from each node must be identical and deterministic. When a view is a -single state this is relatively trivial as all nodes should have the same -internal view of the state. However, careful consideration needs to be given when -querying aggregate or derived state. For example, if an external network wishes -to receive proof of total value of a set of assets held by a group of nodes, the -query must be addressed in such a way that all nodes will compute the result on -exactly the same set of assets. - -## Corda View Address - -Given the above considerations, the proposed structure of the Corda view address -is as follows: - -``` -operator = party1-rpc-address , [ ";" , { party2-rpc-address } ] , "#" , flow-name , [ ":" , { flow-arg1 } , [ ":" , { flow-arg2 } ]] -operator = party1-alias , [ ";" , { party2-alias } ] , "#" , flow-name , [ ":" , { flow-arg1 } , [ ":" , { flow-arg2 } ]] -operator = set-of-parties-alias , "#" , flow-name , [ ":" , { flow-arg1 } , [ ":" , { flow-arg2 } ]] -``` - -Examples - -- `localhost:10006;localhost:10008#QueryStateByKey:myKeyName` -- `AliceNode;BobNode#QueryStateByKey:myKeyName` -- `AliceBobConsortium#QueryStateByKey:myKeyName` - -Where the relay driver holds a mapping of `AliceNode` and `BobNode` to -`localhost:10006` and `localhost:10007`, respectively. The relay driver also -knows how to map `AliceBobConsortium` to the RPC addresses of `AliceNode` and -`BobNode`. - -## Discovery of View Addresses - -Sharing a view address is a process done outside the interoperability protocol. -It is expected that either a fully qualified address is given to the requesting -network, e.g. - -``` -`AliceNode;BobNode#QueryStateByLinearId:b0b3e588-2569-403d-9209-abcb7a53814b` -``` - -Alternatively, the source network can provide a template and the destination -network can fill in the parameters from their own data. For example, the -provided template may be: - -``` -AliceNode;BobNode#QueryBillOfLadingByPurchaseOrderNumber: -``` - -Then, the destination network would use data it holds to make the request: - -``` -AliceNode;BobNode#QueryBillOfLadingByPurchaseOrderNumber:PO12345678 -``` - -## View Data Definition - -The `view` returned from a Corda network in response to a request from an -external network is represented as metadata and data, as described in the [view -definition](./definition.md). - -For the initial implementation of the interoperability CorDapp, the default proof -returned by the Corda network will be notarization, and the default -serialization format will be protobuf. The `data` field of the view will be a byte array of the following binary protobuf data: - -```protobuf -message ViewData { - message NotarizedPayload { - string signature = 1; - string certificate = 2; - string id = 3; - // Bytes of InteropPayload - bytes payload = 4; - } - repeated NotarizedPayload notarized_payloads = 1; -} -``` - -It consists of an array of `NotarizedPayload`, a response from each Corda node. The reason why we need to retain the payload field in each array element instead of keeping just one copy is because the data blobs within those different payloads may be different while being semantically identical (they may be different because of non-deterministic serialization and encryption operations carried out by the [interoperation module](../../models/infrastructure/interoperation-modules.md). - -The `NotarizedPayload.payload` field will have the serialized bytes of following structure: - -```protobuf -message InteropPayload { - // The result returned from the corda flow - bytes payload = 1; - // The full address string (i.e. address = location-segment , "/", ledger-segment "/" , view-segment) - string address = 2; -} -``` - -- `InteropPayload.payload` is the result that is returned from the application CorDapp flow that - is queried. The data in this field is flexible, and can be anything from a - single state, to an array of states or an arbitrary data type that is - calculated from computing derived state. The external network application that - receives the view will need to know how to parse this field and agreement on - format of the field needs to be agreed out-of-band. -- `NotarizedPayload.signature` is the signature of the node providing the view and proof. The - signature is signed on the result encoded as a Base64 bytearray of the JSON - stringified `data`. The signature is provided as a Base64 encoded string. -- `NotarizedPayload.certificate` is the X509 certificate of the node that contains the public key - corresponding to the private key that was used to sign the response. This is - used by the requesting network to verify the signature and authenticate the - identity of the signer based on the network's topology map. The certificate is - provided in PEM format as a Base64 encoded string. -- `NotarizedPayload.id` is the identity of the organisation that owns the node that did the signing -- `notarizations` is the list of all of the signatures and certificates from all - nodes that were required to endorse the request, as well as the id of the node that did the signing. - -### Examples - -`ViewData` - -``` -notarizations: [{ - signature: QbKxQqKlsLJH8MC6eOFhQ/ELful7lbkVrQTwm4Xmfg5xJXeNz8xtqv8any6H4jyXXskyFxYWLISosAfcUdd0BA==, - certificate: MIIBwjCCAVgAwIBAgIIUJkQvmKm35YwFAYIKoZIzj0EAwIGCCqGSM49AwEHMC8xCzAJBgNVBAYTAkdCMQ8wDQYDVQQHDAZMb25kb24xDzANBgNVBAoMBlBhcnR5QTAeFw0yMDA3MjQwMDAwMDBaFw0yNzA1MjAwMDAwMDBaMC8xCzAJBgNVBAYTAkdCMQ8wDQYDVQQHDAZMb25kb24xDzANBgNVBAoMBlBhcnR5QTAqMAUGAytlcAMhAMMKaREKhcTgSBMMzK81oPUSPoVmG/fJMLXq/ujSmse9o4GJMIGGMB0GA1UdDgQWBBRMXtDsKFZzULdQ3c2DCUEx3T1CUDAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIChDATBgNVHSUEDDAKBggrBgEFBQcDAjAfBgNVHSMEGDAWgBR4hwLuLgfIZMEWzG4n3AxwfgPbezARBgorBgEEAYOKYgEBBAMCAQYwFAYIKoZIzj0EAwIGCCqGSM49AwEHA0cAMEQCIC7J46SxDDz3LjDNrEPjjwP2prgMEMh7r/gJpouQHBk+AiA+KzXD0d5miI86D2mYK4C3tRli3X3VgnCe8COqfYyuQg==, - payload: - }] - -``` +initial implementation. + +### Locating the view. + +Corda provides an API for querying state from the vault - the Vault Query API. +The Vault Query API can be used directly by flows, and therefore CorDapps can +define flows that perform a particular vault query. Corda applications can +provide a flow name and set of required parameters (either as a template or +with known values) as an address for a view. This approach has the benefit of +providing a mechanism for not only addressing a single state as a view, but also +collections of states, or even computing derived state. + +The first point of contact with the Corda network from the relay driver is the +interoperation CorDapp that is installed on all nodes that wish to interoperate +with external networks. The relay driver forwards the request from the external +network to the interoperation CorDapp which first checks that the requesting +party has the required access control permission for the view address based on +flow name. It then attempts to call the corresponding flow that must also be +installed on the node. The node will run the query and return the view to the +initiating flow. Additional access control checks may need to be performed at +this point (for example, if access control was defined on a per-state level). +The initiating flow will then coordinate the signing of the response and +assembly of the view proof to be sent back to the relay driver. + +If the approach taken in the addressing of nodes required to endorse the view +was for the requesting network to specify complete list, the interoperation +CorDapp will return the view and proof to the relay driver. The relay driver +will assemble responses from all nodes and return the set of responses back to +the requesting relay. + +If the approach for addressing required endorsing nodes was for just one +participating node to be specified, the interoperation CorDapp must determine +the list of required endorsers from the response returned from the application +CorDapp flow. For example, if a single state was returned, the interoperation +CorDapp will create a flow session for each participant listed in the state and +trigger the interoperation CorDapp flow in these nodes. The interoperation flow +in each node will perform the same steps as listed above (access control checks +for the flow, calling the application CorDapp flow, performing additional access +control checks on the returned view, assembling the view proof) and return the +view and proof response. The initiating interoperation flow will assemble the +responses from each node and return the set back to the relay driver. + +Part of view proof verification by the requesting network requires checking that +all views between endorsing nodes are consistent. This means that the view +returned from each node must be identical and deterministic. When a view is a +single state this is relatively trivial as all nodes should have the same +internal view of the state. However, careful consideration needs to be given when +querying aggregate or derived state. For example, if an external network wishes +to receive proof of total value of a set of assets held by a group of nodes, the +query must be addressed in such a way that all nodes will compute the result on +exactly the same set of assets. + +## Corda View Address + +Given the above considerations, the proposed structure of the Corda view address +is as follows: + +``` +operator = party1-rpc-address , [ ";" , { party2-rpc-address } ] , "#" , flow-name , [ ":" , { flow-arg1 } , [ ":" , { flow-arg2 } ]] +operator = party1-alias , [ ";" , { party2-alias } ] , "#" , flow-name , [ ":" , { flow-arg1 } , [ ":" , { flow-arg2 } ]] +operator = set-of-parties-alias , "#" , flow-name , [ ":" , { flow-arg1 } , [ ":" , { flow-arg2 } ]] +``` + +Examples + +- `localhost:10006;localhost:10008#QueryStateByKey:myKeyName` +- `AliceNode;BobNode#QueryStateByKey:myKeyName` +- `AliceBobConsortium#QueryStateByKey:myKeyName` + +Where the relay driver holds a mapping of `AliceNode` and `BobNode` to +`localhost:10006` and `localhost:10007`, respectively. The relay driver also +knows how to map `AliceBobConsortium` to the RPC addresses of `AliceNode` and +`BobNode`. + +## Discovery of View Addresses + +Sharing a view address is a process done outside the interoperability protocol. +It is expected that either a fully qualified address is given to the requesting +network, e.g. + +``` +`AliceNode;BobNode#QueryStateByLinearId:b0b3e588-2569-403d-9209-abcb7a53814b` +``` + +Alternatively, the source network can provide a template and the destination +network can fill in the parameters from their own data. For example, the +provided template may be: + +``` +AliceNode;BobNode#QueryBillOfLadingByPurchaseOrderNumber: +``` + +Then, the destination network would use data it holds to make the request: + +``` +AliceNode;BobNode#QueryBillOfLadingByPurchaseOrderNumber:PO12345678 +``` + +## View Data Definition + +The `view` returned from a Corda network in response to a request from an +external network is represented as metadata and data, as described in the [view +definition](./definition.md). + +For the initial implementation of the interoperability CorDapp, the default proof +returned by the Corda network will be notarization, and the default +serialization format will be protobuf. The `data` field of the view will be a byte array of the following binary protobuf data: + +```protobuf +message ViewData { + message NotarizedPayload { + string signature = 1; + string certificate = 2; + string id = 3; + // Bytes of InteropPayload + bytes payload = 4; + } + repeated NotarizedPayload notarized_payloads = 1; +} +``` + +It consists of an array of `NotarizedPayload`, a response from each Corda node. The reason why we need to retain the payload field in each array element instead of keeping just one copy is because the data blobs within those different payloads may be different while being semantically identical (they may be different because of non-deterministic serialization and encryption operations carried out by the [interoperation module](../../models/infrastructure/interoperation-modules.md). + +The `NotarizedPayload.payload` field will have the serialized bytes of following structure: + +```protobuf +message InteropPayload { + // The result returned from the corda flow + bytes payload = 1; + // The full address string (i.e. address = location-segment , "/", ledger-segment "/" , view-segment) + string address = 2; +} +``` + +- `InteropPayload.payload` is the result that is returned from the application CorDapp flow that + is queried. The data in this field is flexible, and can be anything from a + single state, to an array of states or an arbitrary data type that is + calculated from computing derived state. The external network application that + receives the view will need to know how to parse this field and agreement on + format of the field needs to be agreed out-of-band. +- `NotarizedPayload.signature` is the signature of the node providing the view and proof. The + signature is signed on the result encoded as a Base64 bytearray of the JSON + stringified `data`. The signature is provided as a Base64 encoded string. +- `NotarizedPayload.certificate` is the X509 certificate of the node that contains the public key + corresponding to the private key that was used to sign the response. This is + used by the requesting network to verify the signature and authenticate the + identity of the signer based on the network's topology map. The certificate is + provided in PEM format as a Base64 encoded string. +- `NotarizedPayload.id` is the identity of the organisation that owns the node that did the signing +- `notarizations` is the list of all of the signatures and certificates from all + nodes that were required to endorse the request, as well as the id of the node that did the signing. + +### Examples + +`ViewData` + +``` +notarizations: [{ + signature: QbKxQqKlsLJH8MC6eOFhQ/ELful7lbkVrQTwm4Xmfg5xJXeNz8xtqv8any6H4jyXXskyFxYWLISosAfcUdd0BA==, + certificate: MIIBwjCCAVgAwIBAgIIUJkQvmKm35YwFAYIKoZIzj0EAwIGCCqGSM49AwEHMC8xCzAJBgNVBAYTAkdCMQ8wDQYDVQQHDAZMb25kb24xDzANBgNVBAoMBlBhcnR5QTAeFw0yMDA3MjQwMDAwMDBaFw0yNzA1MjAwMDAwMDBaMC8xCzAJBgNVBAYTAkdCMQ8wDQYDVQQHDAZMb25kb24xDzANBgNVBAoMBlBhcnR5QTAqMAUGAytlcAMhAMMKaREKhcTgSBMMzK81oPUSPoVmG/fJMLXq/ujSmse9o4GJMIGGMB0GA1UdDgQWBBRMXtDsKFZzULdQ3c2DCUEx3T1CUDAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIChDATBgNVHSUEDDAKBggrBgEFBQcDAjAfBgNVHSMEGDAWgBR4hwLuLgfIZMEWzG4n3AxwfgPbezARBgorBgEEAYOKYgEBBAMCAQYwFAYIKoZIzj0EAwIGCCqGSM49AwEHA0cAMEQCIC7J46SxDDz3LjDNrEPjjwP2prgMEMh7r/gJpouQHBk+AiA+KzXD0d5miI86D2mYK4C3tRli3X3VgnCe8COqfYyuQg==, + payload: + }] + +``` diff --git a/weaver/rfcs/formats/views/ethereum.md b/weaver/rfcs/formats/views/ethereum.md index a351ef7b3b..63de8b17b0 100644 --- a/weaver/rfcs/formats/views/ethereum.md +++ b/weaver/rfcs/formats/views/ethereum.md @@ -3,31 +3,31 @@ SPDX-License-Identifier: CC-BY-4.0 --> -# Ethereum Views +# Ethereum Views - RFC: 03-006 - Authors: Ermyas Abebe - Status: Draft - Since: 13-Aug-2020 -## Addressing an Ethereum View - -``` -operator = contract-address , function-selector , { argument } -``` - -Example: - -* Contract Address: 0x51c9c2475f106fd6bed2bd45824a9ab5b0d24113 -* Function Selector: 0xcdcd77c0 -* Argument 1: 0x0000000000000000000000000000000000000000000000000000000000000045 -* Argument 2: 0x0000000000000000000000000000000000000000000000000000000000000001 - -``` -operator = 0x51c9c2475f106fd6bed2bd45824a9ab5b0d24113:0xcdcd77c00x00000000000000000000000000000000000000000000000000000000000000450x0000000000000000000000000000000000000000000000000000000000000001 -``` - -## View Data Definition - -### Examples - +## Addressing an Ethereum View + +``` +operator = contract-address , function-selector , { argument } +``` + +Example: + +* Contract Address: 0x51c9c2475f106fd6bed2bd45824a9ab5b0d24113 +* Function Selector: 0xcdcd77c0 +* Argument 1: 0x0000000000000000000000000000000000000000000000000000000000000045 +* Argument 2: 0x0000000000000000000000000000000000000000000000000000000000000001 + +``` +operator = 0x51c9c2475f106fd6bed2bd45824a9ab5b0d24113:0xcdcd77c00x00000000000000000000000000000000000000000000000000000000000000450x0000000000000000000000000000000000000000000000000000000000000001 +``` + +## View Data Definition + +### Examples + diff --git a/weaver/rfcs/models/identity/design-choices.md b/weaver/rfcs/models/identity/design-choices.md index 3585bfb651..b044eba3bb 100644 --- a/weaver/rfcs/models/identity/design-choices.md +++ b/weaver/rfcs/models/identity/design-choices.md @@ -3,109 +3,109 @@ SPDX-License-Identifier: CC-BY-4.0 --> -# Design Choices for Identity Management - +# Design Choices for Identity Management + - RFC: 01-011-appendix -- Authors: Dileban Karunamoorthy, Ermyas Abebe, Venkatraman Ramakrishna +- Authors: Dileban Karunamoorthy, Ermyas Abebe, Venkatraman Ramakrishna - Status: Proposed -- Since: 21-Jan-2021 - -## Summary - -- Permissioned networks rely on the identity of its members for validating state. -- Membership credentials, in the form of digital certificates, are issued by trust authorities local to the network. -- Remote agents must be in possession of the necesary identity information in order to verify attestations from a network. -- There are multiple approaches to sharing network identity with external agents, each having differenet trade-offs. - -## Point-to-Point Identity Exchange - -The simplest approach to sharing identity information is via a bilateral exchange. This approach makes sense as initial step when enabling interoperability across two networks. No additional machinery is required beyond shairng a signed package of identities. - - - -- An external agent requests a network for a signed packaged of identities. -- The signing parties form the trust root for providing valid identity sets. - - The signing parties can be a small subset of the network, such as the set of consortium members. -- Remote agents use the known trust root to verify signed packages. - - We assume here that this trust root is relatively static and known apriori. -- The distribution of signed packages can be done in different ways: - - Made available upon request. - - Made available via a REST endpoint or the consortium website. - - Made available through the network's relay service via a configuration protocol. - -### Additional Considerations - -- Updating and notifying or distributing new packages when the network configuration changes. -- Remote networks may need to detect or be notified of potential identity revocations. -- Remote networks may retrieve recent member information when unknown identities are detected. - - -## Well-Known Registry for Identity Exchange - -When multiple networks are involved, sharing identity information across each relationship is expensive. A well-known registry can be used to publish and share identity information. Networks query the registry for relevant identity information of those they intend to interoperate with. - - - -- A well-known registry is used to publish and share network identity information. - - The registry can be public or limit access to a group of networks, such as those part of an ecosystem. - - The registry can be a centralized web service or database, or decentralized. -- Remote networks source identity information via the registry -- Root of trust can be: - - The registry service - - A signing trust root from the source network - -### Additional Considerations - -- Updating registry information when the network configuration changes. -- Remote networks may need to detect or be notified of potential identity revocations. -- Remote networks may retrieve recent member information when unknown identities are detected. - -## DID Registry for Identity Exchange - -An alternative to using a general-purpose registry of information is a DID registry. Networks in this instance are identified by a network DID, owned by the consortium. - - - -- A DID registry is used to register network DIDs. -- The network DID is owned by a trust root of the source network. - - Trust root can be small subset of members (e.g. consortium members) - - Ownership can be determined using different signature schemes, such as multisig and threshold signatures. -- The network DID is made publicly available. - - For example, through the consortium website. -- Remote networks retrieve and verify the DID doc corressponding to the network DID. -- The DID doc contains member information. - - The member certificates can be directly encoded in the DID doc. - - Alternatively, DID doc contains location and hash of packaged certificates. - -### Additional Considerations - -- Updating member information in DID doc when the network configuration changes. -- Remote networks may need to detect or be notified of potential identity revocations. -- Remote networks may retrieve recent DID doc when unknown identities are detected. - - -## Verinyms for Identity Exchange - -Network verinyms as a means to identify a legal entity and its participation in one or more networks is an improvement on using network DIDs alone. In addition to identifying a network and its members, this approach allows legal entities to prove membership in one or more networks. - - - -- The trust anchors of a DID registry registers network verinyms. -- A network verinym is owned by the trust root of the source network - - The trust root can be small subset of members (e.g. consortium members) - - Ownership can be determined using different signature schemes, such as multisig and threshold signatures. -- The network verinym is made publicly available. - - For example, through the consortium website. -- Remote networks retrieve and verify the DID doc corressponding to the network verinym. -- The DID doc contains member verinyms. -- Member verinyms are resolved and used to retrieve underlying network membership certificates. -- Members can also prove network membership by presenting membership credentials. - -### Additional Considerations - -- Updating the network verinym's member list when the network configuration changes. -- Remote networks may need to detect or be notified of potential identity revocations. -- Remote networks may retrieve recent DID doc when unknown identities are detected. -- Reducing the number of messages exchanged between networks when resolving the underlying identity certificates. - - +- Since: 21-Jan-2021 + +## Summary + +- Permissioned networks rely on the identity of its members for validating state. +- Membership credentials, in the form of digital certificates, are issued by trust authorities local to the network. +- Remote agents must be in possession of the necesary identity information in order to verify attestations from a network. +- There are multiple approaches to sharing network identity with external agents, each having differenet trade-offs. + +## Point-to-Point Identity Exchange + +The simplest approach to sharing identity information is via a bilateral exchange. This approach makes sense as initial step when enabling interoperability across two networks. No additional machinery is required beyond shairng a signed package of identities. + + + +- An external agent requests a network for a signed packaged of identities. +- The signing parties form the trust root for providing valid identity sets. + - The signing parties can be a small subset of the network, such as the set of consortium members. +- Remote agents use the known trust root to verify signed packages. + - We assume here that this trust root is relatively static and known apriori. +- The distribution of signed packages can be done in different ways: + - Made available upon request. + - Made available via a REST endpoint or the consortium website. + - Made available through the network's relay service via a configuration protocol. + +### Additional Considerations + +- Updating and notifying or distributing new packages when the network configuration changes. +- Remote networks may need to detect or be notified of potential identity revocations. +- Remote networks may retrieve recent member information when unknown identities are detected. + + +## Well-Known Registry for Identity Exchange + +When multiple networks are involved, sharing identity information across each relationship is expensive. A well-known registry can be used to publish and share identity information. Networks query the registry for relevant identity information of those they intend to interoperate with. + + + +- A well-known registry is used to publish and share network identity information. + - The registry can be public or limit access to a group of networks, such as those part of an ecosystem. + - The registry can be a centralized web service or database, or decentralized. +- Remote networks source identity information via the registry +- Root of trust can be: + - The registry service + - A signing trust root from the source network + +### Additional Considerations + +- Updating registry information when the network configuration changes. +- Remote networks may need to detect or be notified of potential identity revocations. +- Remote networks may retrieve recent member information when unknown identities are detected. + +## DID Registry for Identity Exchange + +An alternative to using a general-purpose registry of information is a DID registry. Networks in this instance are identified by a network DID, owned by the consortium. + + + +- A DID registry is used to register network DIDs. +- The network DID is owned by a trust root of the source network. + - Trust root can be small subset of members (e.g. consortium members) + - Ownership can be determined using different signature schemes, such as multisig and threshold signatures. +- The network DID is made publicly available. + - For example, through the consortium website. +- Remote networks retrieve and verify the DID doc corressponding to the network DID. +- The DID doc contains member information. + - The member certificates can be directly encoded in the DID doc. + - Alternatively, DID doc contains location and hash of packaged certificates. + +### Additional Considerations + +- Updating member information in DID doc when the network configuration changes. +- Remote networks may need to detect or be notified of potential identity revocations. +- Remote networks may retrieve recent DID doc when unknown identities are detected. + + +## Verinyms for Identity Exchange + +Network verinyms as a means to identify a legal entity and its participation in one or more networks is an improvement on using network DIDs alone. In addition to identifying a network and its members, this approach allows legal entities to prove membership in one or more networks. + + + +- The trust anchors of a DID registry registers network verinyms. +- A network verinym is owned by the trust root of the source network + - The trust root can be small subset of members (e.g. consortium members) + - Ownership can be determined using different signature schemes, such as multisig and threshold signatures. +- The network verinym is made publicly available. + - For example, through the consortium website. +- Remote networks retrieve and verify the DID doc corressponding to the network verinym. +- The DID doc contains member verinyms. +- Member verinyms are resolved and used to retrieve underlying network membership certificates. +- Members can also prove network membership by presenting membership credentials. + +### Additional Considerations + +- Updating the network verinym's member list when the network configuration changes. +- Remote networks may need to detect or be notified of potential identity revocations. +- Remote networks may retrieve recent DID doc when unknown identities are detected. +- Reducing the number of messages exchanged between networks when resolving the underlying identity certificates. + + diff --git a/weaver/rfcs/models/ledger/observation-of-state.md b/weaver/rfcs/models/ledger/observation-of-state.md index 1fb19f66d3..8148ec6014 100644 --- a/weaver/rfcs/models/ledger/observation-of-state.md +++ b/weaver/rfcs/models/ledger/observation-of-state.md @@ -3,45 +3,45 @@ SPDX-License-Identifier: CC-BY-4.0 --> -# Verifiable Observation of State - +# Verifiable Observation of State + - RFC: 01-003 -- Authors: Allison Irvin, Dileban Karunamoorthy, Ermyas Abebe, Venkatraman Ramakrishna -- Status: Draft -- Since: 13-Aug-2020 - -## Summary - -* An observer, also called a remote agent or external client, is a non-participant of a ledger - they are external to a committee or community maintaining a ledger. -* Observers can however receive state that can be verified -* The ability to observe and verify state on remote ledgers is the basis for desiging an interoperable protocol. - -## Observers are Non-Participants - - -Neither run full nodes nor have a valid identity recognized by the maintainers of a ledger. -Observers differ from participants of a ledger, including those who don't run fulls nodes, in the following ways: -* Observers don't run full nodes -* Observers may not have complete knowledge of all maintainers of state. -* Observers may not have knowledge of policies governing the state. -* Observers don't participate in the governance process. -* Unlike internal participants, observers are not signatories to state. - -### Required Further Discussion - -* Internal client, in the following, implies that no member of the client's org maintains a full-node. -* Perhaps there is little use in drawing a distinction between observers and internal clients who don't run full nodes. -* If the ledger is public, the two are identitical. -* If the ledger is permissioned: - * The maintainers of state control access and visibility of state to both internal clients and observers. - * Observers have identities, just like the internal counterparts, just not issued by the maintainers. - * Observers must be carefully vetted just like internal clients. -* Internal clients exist to transact on the network, i.e. they are signatories of state concerning them. -* State changes by observers are carried out by a participant, on behalf of the observer. +- Authors: Allison Irvin, Dileban Karunamoorthy, Ermyas Abebe, Venkatraman Ramakrishna +- Status: Draft +- Since: 13-Aug-2020 + +## Summary + +* An observer, also called a remote agent or external client, is a non-participant of a ledger - they are external to a committee or community maintaining a ledger. +* Observers can however receive state that can be verified +* The ability to observe and verify state on remote ledgers is the basis for desiging an interoperable protocol. + +## Observers are Non-Participants + + +Neither run full nodes nor have a valid identity recognized by the maintainers of a ledger. +Observers differ from participants of a ledger, including those who don't run fulls nodes, in the following ways: +* Observers don't run full nodes +* Observers may not have complete knowledge of all maintainers of state. +* Observers may not have knowledge of policies governing the state. +* Observers don't participate in the governance process. +* Unlike internal participants, observers are not signatories to state. + +### Required Further Discussion + +* Internal client, in the following, implies that no member of the client's org maintains a full-node. +* Perhaps there is little use in drawing a distinction between observers and internal clients who don't run full nodes. +* If the ledger is public, the two are identitical. +* If the ledger is permissioned: + * The maintainers of state control access and visibility of state to both internal clients and observers. + * Observers have identities, just like the internal counterparts, just not issued by the maintainers. + * Observers must be carefully vetted just like internal clients. +* Internal clients exist to transact on the network, i.e. they are signatories of state concerning them. +* State changes by observers are carried out by a participant, on behalf of the observer. * The distinction gets blurry if networks are based entirely on an identity system such as SSI. - - -## "Destination" Networks are Observers - -* TODO: We need a better term than "destination" (e.g. consumer/consuming network) -* Destination networks are observers with access to views projected by a ("source") network. + + +## "Destination" Networks are Observers + +* TODO: We need a better term than "destination" (e.g. consumer/consuming network) +* Destination networks are observers with access to views projected by a ("source") network. diff --git a/weaver/rfcs/models/security/security-domains.md b/weaver/rfcs/models/security/security-domains.md index 18d20591cd..255df476f8 100644 --- a/weaver/rfcs/models/security/security-domains.md +++ b/weaver/rfcs/models/security/security-domains.md @@ -11,21 +11,21 @@ - Since: 13-Aug-2020 ## Security Domains: Summary - -Security domains are defined on external entities such as a thing, person, organization or on groups of entities such as networks. Security domains assemble a set of policies that are applied to a variety of interactions with a given external entity. These can include access control policies that are applied on requests to read state on the ledger or verification policies applied on presentation of a proof of state by the external entity. - -A security domain is an abstract concept used to group a set of members to various policies. The relation between the various concepts is shown in the image below. - - - -### Members and Traits of Security Domains - -The members of a security domain are represented by a set of public keys, certificates or certificate authorities. This allows security domains to be defined over a domain of validators in a permissionless network, a domain of known entities in a club, an organization, or a group of organizations representing a permissioned network. - -It is also possible for the members of a security domain to be empty, implying that any interaction with the external entity doesn't involve validation of membership. This is useful in situtations when the external entitiy is known by other non-identifiable traits, such as the _difficulty target_ in the Bitcoin network, zero-knowledge credentials (e.g. a bank in Australia), knowledge of some secret, or a commitment. - -### Policies - -The application of policies helps guard against unauthorized access to state or presentation of proofs that are either invalid or don't meet an expected integrity criteria. - -## Securing the Relay + +Security domains are defined on external entities such as a thing, person, organization or on groups of entities such as networks. Security domains assemble a set of policies that are applied to a variety of interactions with a given external entity. These can include access control policies that are applied on requests to read state on the ledger or verification policies applied on presentation of a proof of state by the external entity. + +A security domain is an abstract concept used to group a set of members to various policies. The relation between the various concepts is shown in the image below. + + + +### Members and Traits of Security Domains + +The members of a security domain are represented by a set of public keys, certificates or certificate authorities. This allows security domains to be defined over a domain of validators in a permissionless network, a domain of known entities in a club, an organization, or a group of organizations representing a permissioned network. + +It is also possible for the members of a security domain to be empty, implying that any interaction with the external entity doesn't involve validation of membership. This is useful in situtations when the external entitiy is known by other non-identifiable traits, such as the _difficulty target_ in the Bitcoin network, zero-knowledge credentials (e.g. a bank in Australia), knowledge of some secret, or a commitment. + +### Policies + +The application of policies helps guard against unauthorized access to state or presentation of proofs that are either invalid or don't meet an expected integrity criteria. + +## Securing the Relay diff --git a/weaver/rfcs/protocols/asset-exchange/generic-htlc.md b/weaver/rfcs/protocols/asset-exchange/generic-htlc.md index 661b7bb168..ff44c0f012 100644 --- a/weaver/rfcs/protocols/asset-exchange/generic-htlc.md +++ b/weaver/rfcs/protocols/asset-exchange/generic-htlc.md @@ -1,86 +1,86 @@ - -# Asset Exchange - HTLC - -- RFC: 02-004 -- Authors: Dileban Karunamoorthy, Ermyas Abebe, Venkatraman Ramakrishna, Sandeep Nishad, Krishnasuri Narayanam, Dhinakaran Vinayagamurthy -- Status: Proposed -- Since: 25-Nov-2020 - -## Summary - -* Asset exchange protocols allow parties to trade assets residing in different ledgers. -* Hash Time Locked Contract (HTLC) is a pattern for enabling asset exchanges without placing trust in a central party. -* HTLCs are atomic in nature, in that, either a deal executes and parties receive the assets they wanted, or parties retain their original assets. - -## Atomic Swaps with Hash Time Locked Contracts - -Hash Time Locked Contracts (HTLCs) is a non-custodial approach to exchanging assets between two or more parties. Although HTLCs do have some drawbacks in their construction, they are simple enough to address a number of real-world exchange scenarios. - -The following figure describes the flow of a two-party exchange. - - - -Alice and Bob agree on a deal off-chain to exchange assets A for B. In practice assets A and B could be fungible or non-fungile and the deal is based on their fair value. Alice and Bob also agree on the refund period for the deal and decide who will go first. The process for exchanging the assets then proceeds as follows. - -1. Alice creates an agreement on *Ledger A*, locking her asset using the hash *H(s)* of a secret *s* for the period *2t*. The secret is only known to Alice at this stage. -2. Upon inspection, Bob creates a corresponding agreement on *Ledger B* locking his asset using the same hash shared by Alice. The refund period is set to *t*. -3. Alice inspects Bob's agreement before proceeding. She verifies the hash *H(s)* and period *t*. -4. Alice attempts to claims Bob's asset using secret *s*. If the hash of *s* matches *H(s)* in the agreement and the current time is less than *t*, the asset is unlocked and transferred to Alice. -5. Bob, observing the secret shared by Alice on *Ledger B*, now attempts to claim Alice's asset using secret *s*. If hash of secret *s* matches *H(s)* and the current time is less than *2t* the asset is unlocked and transferred to Bob. - -In the event Alice fails to claim Bob's asset before the refund period *t* elapses, Bob can reclaim his asset any time after. Similarly, Alice can reclaim her asset any time after *2t*. - - -## Overview of Protocol Flow - - - - -## Protocol Messages - - - - - -## Open Questions - -* Can the coordination and agreement on the deal, which currently happens off-chain, be coordinated by the protocol? -* Do we assume that the application, acting on behalf of Alice and Bob, is always available and that a deal never partially fail? - * Given this is a permissioned network, on the rare chance that an application is down after counter party has claimed asset, the failure to claim can be resolved through manual procedures involving the management committees and proving of counter party claims. -* The relay and driver plays no special role besides communicating flow of messages. -* Alternatively, the application layer can coordiante the whole swap instead of involving the relay / drivers. - - -## Additional Notes - -* Alice sets up deal on-chain by locking asset - * Asset owned by Alice - * Image of secret (hash-lock) - * Duration till asset can be redeemed - * Bob as counter-party - * Address of counter party's asset - * Deal ID -* Creation of deal generates event that is propagated to destination network -* Bob creats corresponding deal using event data on destination network - * Asset owned by Bob - * Image of secret - * Duration till asset can be redeemed - * Alice as counter-party - * Address of counter party's asset - * Deal ID (matches with deal ID in origin network) -* Scenario 1: Alice claims Bob's asset on destination network - * Submits secret - * Secret is propagated to origin network - * Automatic claim executed. - * Possible failure if message is lost. - * Bob claims Alice's asset using secret. -* Scenario 2: Time lock expires - * Bob reclaims asset in destination network - * Alice reclaims asset in source network - * Alternatively, automatic reclaiming by driver or application - - + +# Asset Exchange - HTLC + +- RFC: 02-004 +- Authors: Dileban Karunamoorthy, Ermyas Abebe, Venkatraman Ramakrishna, Sandeep Nishad, Krishnasuri Narayanam, Dhinakaran Vinayagamurthy +- Status: Proposed +- Since: 25-Nov-2020 + +## Summary + +* Asset exchange protocols allow parties to trade assets residing in different ledgers. +* Hash Time Locked Contract (HTLC) is a pattern for enabling asset exchanges without placing trust in a central party. +* HTLCs are atomic in nature, in that, either a deal executes and parties receive the assets they wanted, or parties retain their original assets. + +## Atomic Swaps with Hash Time Locked Contracts + +Hash Time Locked Contracts (HTLCs) is a non-custodial approach to exchanging assets between two or more parties. Although HTLCs do have some drawbacks in their construction, they are simple enough to address a number of real-world exchange scenarios. + +The following figure describes the flow of a two-party exchange. + + + +Alice and Bob agree on a deal off-chain to exchange assets A for B. In practice assets A and B could be fungible or non-fungile and the deal is based on their fair value. Alice and Bob also agree on the refund period for the deal and decide who will go first. The process for exchanging the assets then proceeds as follows. + +1. Alice creates an agreement on *Ledger A*, locking her asset using the hash *H(s)* of a secret *s* for the period *2t*. The secret is only known to Alice at this stage. +2. Upon inspection, Bob creates a corresponding agreement on *Ledger B* locking his asset using the same hash shared by Alice. The refund period is set to *t*. +3. Alice inspects Bob's agreement before proceeding. She verifies the hash *H(s)* and period *t*. +4. Alice attempts to claims Bob's asset using secret *s*. If the hash of *s* matches *H(s)* in the agreement and the current time is less than *t*, the asset is unlocked and transferred to Alice. +5. Bob, observing the secret shared by Alice on *Ledger B*, now attempts to claim Alice's asset using secret *s*. If hash of secret *s* matches *H(s)* and the current time is less than *2t* the asset is unlocked and transferred to Bob. + +In the event Alice fails to claim Bob's asset before the refund period *t* elapses, Bob can reclaim his asset any time after. Similarly, Alice can reclaim her asset any time after *2t*. + + +## Overview of Protocol Flow + + + + +## Protocol Messages + + + + + +## Open Questions + +* Can the coordination and agreement on the deal, which currently happens off-chain, be coordinated by the protocol? +* Do we assume that the application, acting on behalf of Alice and Bob, is always available and that a deal never partially fail? + * Given this is a permissioned network, on the rare chance that an application is down after counter party has claimed asset, the failure to claim can be resolved through manual procedures involving the management committees and proving of counter party claims. +* The relay and driver plays no special role besides communicating flow of messages. +* Alternatively, the application layer can coordiante the whole swap instead of involving the relay / drivers. + + +## Additional Notes + +* Alice sets up deal on-chain by locking asset + * Asset owned by Alice + * Image of secret (hash-lock) + * Duration till asset can be redeemed + * Bob as counter-party + * Address of counter party's asset + * Deal ID +* Creation of deal generates event that is propagated to destination network +* Bob creats corresponding deal using event data on destination network + * Asset owned by Bob + * Image of secret + * Duration till asset can be redeemed + * Alice as counter-party + * Address of counter party's asset + * Deal ID (matches with deal ID in origin network) +* Scenario 1: Alice claims Bob's asset on destination network + * Submits secret + * Secret is propagated to origin network + * Automatic claim executed. + * Possible failure if message is lost. + * Bob claims Alice's asset using secret. +* Scenario 2: Time lock expires + * Bob reclaims asset in destination network + * Alice reclaims asset in source network + * Alternatively, automatic reclaiming by driver or application + + diff --git a/weaver/rfcs/protocols/events/event-bus.md b/weaver/rfcs/protocols/events/event-bus.md index 3ac1caf7fa..c43a88728e 100644 --- a/weaver/rfcs/protocols/events/event-bus.md +++ b/weaver/rfcs/protocols/events/event-bus.md @@ -3,117 +3,117 @@ SPDX-License-Identifier: CC-BY-4.0 --> -# Event Bus +# Event Bus - RFC: 02-016 - Authors: Ermyas Abebe, Dileban Karunamoorthy - Status: Draft - Since: 01-Dec-2020 -## Summary - -* The event bus is a protocol for end-to-end event delivery between distributed ledgers. -* The protocol enables external agents to subscribe to events of interest from remote ledgers. -* The protocol delivers events to all authorized agents using a choice of security and delivery guarantees. -* Events can be observed by zero or more agents. - -## Types of Events - -See specification on [events](../models/events.md) for event types. - - - -## Event Capture and Delivery - -### Application and System Events - -* Relay drivers subscribe to all events generated by a network. These include system and application events. -* The events are filtered based filtering rules defined in the Weaver Dapp. -* When matches are found, the event is forwarded to all event subscribers registered in the Weaver Dapp. -* Defining matching rules for application events might be challenging. - * E.g. filtering events generated by Dapps based on object ID or topic name is hard if there is no standard for publishing events. -* Drivers must be granted appropriate permissions to listen to events from the network. - * Fine grained control of which application events are visible to drivers might be hard to achieve. -* Drivers must maintain additional state locally in order to provide delivery guarantees (e.g at least once, sequencing, etc). - - - - -### Weaver Events - -* The driver only monitors the Weaver Dapp for new events. -* Weaver events are events registered by applications directly with the Weaver Dapp. -* Weaver events can be part of the world state or simply emited during commit time. - * If events are managed by the ledger, drivers can be stateless. - * State for ensuring delivery guarantees, sequencing etc can be tracked on the ledger. -* The data model for Weaver events are easier to enforce, which applications must comply with. - - - -## Event Proofs - -The role of events can be thought of in two complementary ways with different trade-offs. - -* Events are non-critical side-effects of execution that are useful for trigering external actions. -* Events are a way to communicate critical information to continue a larger work flow. - -The trade-offs come in the form of the cost to generate proofs. - -* An event can be delivered as is, signed by the driver/relay. - * Least cost, has little impact on the network. -* An event can be accompanied by a proof derived from the block that produced the proof. - * Example: a merkle proof based on a signed block header. - * Might not be applicable to all DLTs (e.g. a Fabric transaction's validity requires further proof). - * Lower cost, little impact on the network*. -* An event can be signed by mulitple organizations. - * Signatures requested explicitly from a set of orgs. - * Stateful (Weaver) events where signatures are acquired through standard queries. - * Cost is no different to generating standard data proofs - -## Delivery Gurantees - -Delivery guarantees can differ depending on whether events are critical or non-critical. - -* At most once - * Fire and forget - * Suitable for non-critical events -* At least once - * Critical events -* Exactly once - * Critical events - * Can be costlier to implement - -## Event Subscription Protocol - -Discuss protocol for registering and receiving events. - -* Permissioned networks restrict visibility of a ledger's state to its members. -* This includes visibility on application, system and Weaver events. -* Access to a ledger's events by an external agent must be explicitly granted (similar to rules when sharing state). -* How do external agents learn about events to subscrible to. -* Format of event subscription messages. -* Authorization by network after subscribing to events. -* Registering topics and subscribers. - * Events belong to topics which remote ledgers can subscribe to. Allow one or more subscribers per topic. - * E.g. Multiple subsribers: remote ledgers interested in a sanctions list. - * E.g. Single subscriber: a one-to-one relationship exists between business objects on the origin and destination ledger. -* Push vs pull models - * Push model. - -### Policies to Prevent Flooding or Spam - -* Receiving relays might need to define policies in order to prevent potential spam after registering with a remote ledger. -* Policies can rate limit events, restrict events up to a certain size, etc. - -## Anatomy of an Event - -* Message structure for subscribing to events -* Message structure encapsulating and communicating events -* Event representation - * A DLT-neutral standard helps here - * Encapsulate DLT event in network neutral standard with useful metadata - - - - - +## Summary + +* The event bus is a protocol for end-to-end event delivery between distributed ledgers. +* The protocol enables external agents to subscribe to events of interest from remote ledgers. +* The protocol delivers events to all authorized agents using a choice of security and delivery guarantees. +* Events can be observed by zero or more agents. + +## Types of Events + +See specification on [events](../models/events.md) for event types. + + + +## Event Capture and Delivery + +### Application and System Events + +* Relay drivers subscribe to all events generated by a network. These include system and application events. +* The events are filtered based filtering rules defined in the Weaver Dapp. +* When matches are found, the event is forwarded to all event subscribers registered in the Weaver Dapp. +* Defining matching rules for application events might be challenging. + * E.g. filtering events generated by Dapps based on object ID or topic name is hard if there is no standard for publishing events. +* Drivers must be granted appropriate permissions to listen to events from the network. + * Fine grained control of which application events are visible to drivers might be hard to achieve. +* Drivers must maintain additional state locally in order to provide delivery guarantees (e.g at least once, sequencing, etc). + + + + +### Weaver Events + +* The driver only monitors the Weaver Dapp for new events. +* Weaver events are events registered by applications directly with the Weaver Dapp. +* Weaver events can be part of the world state or simply emited during commit time. + * If events are managed by the ledger, drivers can be stateless. + * State for ensuring delivery guarantees, sequencing etc can be tracked on the ledger. +* The data model for Weaver events are easier to enforce, which applications must comply with. + + + +## Event Proofs + +The role of events can be thought of in two complementary ways with different trade-offs. + +* Events are non-critical side-effects of execution that are useful for trigering external actions. +* Events are a way to communicate critical information to continue a larger work flow. + +The trade-offs come in the form of the cost to generate proofs. + +* An event can be delivered as is, signed by the driver/relay. + * Least cost, has little impact on the network. +* An event can be accompanied by a proof derived from the block that produced the proof. + * Example: a merkle proof based on a signed block header. + * Might not be applicable to all DLTs (e.g. a Fabric transaction's validity requires further proof). + * Lower cost, little impact on the network*. +* An event can be signed by mulitple organizations. + * Signatures requested explicitly from a set of orgs. + * Stateful (Weaver) events where signatures are acquired through standard queries. + * Cost is no different to generating standard data proofs + +## Delivery Gurantees + +Delivery guarantees can differ depending on whether events are critical or non-critical. + +* At most once + * Fire and forget + * Suitable for non-critical events +* At least once + * Critical events +* Exactly once + * Critical events + * Can be costlier to implement + +## Event Subscription Protocol + +Discuss protocol for registering and receiving events. + +* Permissioned networks restrict visibility of a ledger's state to its members. +* This includes visibility on application, system and Weaver events. +* Access to a ledger's events by an external agent must be explicitly granted (similar to rules when sharing state). +* How do external agents learn about events to subscrible to. +* Format of event subscription messages. +* Authorization by network after subscribing to events. +* Registering topics and subscribers. + * Events belong to topics which remote ledgers can subscribe to. Allow one or more subscribers per topic. + * E.g. Multiple subsribers: remote ledgers interested in a sanctions list. + * E.g. Single subscriber: a one-to-one relationship exists between business objects on the origin and destination ledger. +* Push vs pull models + * Push model. + +### Policies to Prevent Flooding or Spam + +* Receiving relays might need to define policies in order to prevent potential spam after registering with a remote ledger. +* Policies can rate limit events, restrict events up to a certain size, etc. + +## Anatomy of an Event + +* Message structure for subscribing to events +* Message structure encapsulating and communicating events +* Event representation + * A DLT-neutral standard helps here + * Encapsulate DLT event in network neutral standard with useful metadata + + + + + diff --git a/weaver/rfcs/terminology.md b/weaver/rfcs/terminology.md index 252b0a818b..fe5502e98e 100644 --- a/weaver/rfcs/terminology.md +++ b/weaver/rfcs/terminology.md @@ -3,34 +3,34 @@ SPDX-License-Identifier: CC-BY-4.0 --> -# Terminology - -**External Client** - -Or Remote Client, Observer, Remote Agent? - -**Destination Network** - -TBC (term needs to change) - -**Driver** - -... - -**IoP Modules** - -What is a good general term for the modules we deploy on ledgers/networks to support interoperability? - -(e.g. InteropCorDapp, fnimcc/snamcc, etc) - -**Relay** - -... - -**Security Group** - -... - -**Source Network** - -TBC (term needs to change) +# Terminology + +**External Client** + +Or Remote Client, Observer, Remote Agent? + +**Destination Network** + +TBC (term needs to change) + +**Driver** + +... + +**IoP Modules** + +What is a good general term for the modules we deploy on ledgers/networks to support interoperability? + +(e.g. InteropCorDapp, fnimcc/snamcc, etc) + +**Relay** + +... + +**Security Group** + +... + +**Source Network** + +TBC (term needs to change) diff --git a/weaver/sdks/corda/gradlew.bat b/weaver/sdks/corda/gradlew.bat index 24467a141f..9618d8d960 100644 --- a/weaver/sdks/corda/gradlew.bat +++ b/weaver/sdks/corda/gradlew.bat @@ -1,100 +1,100 @@ -@rem -@rem Copyright 2015 the original author or authors. -@rem -@rem Licensed under the Apache License, Version 2.0 (the "License"); -@rem you may not use this file except in compliance with the License. -@rem You may obtain a copy of the License at -@rem -@rem https://www.apache.org/licenses/LICENSE-2.0 -@rem -@rem Unless required by applicable law or agreed to in writing, software -@rem distributed under the License is distributed on an "AS IS" BASIS, -@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -@rem See the License for the specific language governing permissions and -@rem limitations under the License. -@rem - -@if "%DEBUG%" == "" @echo off -@rem ########################################################################## -@rem -@rem Gradle startup script for Windows -@rem -@rem ########################################################################## - -@rem Set local scope for the variables with windows NT shell -if "%OS%"=="Windows_NT" setlocal - -set DIRNAME=%~dp0 -if "%DIRNAME%" == "" set DIRNAME=. -set APP_BASE_NAME=%~n0 -set APP_HOME=%DIRNAME% - -@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" - -@rem Find java.exe -if defined JAVA_HOME goto findJavaFromJavaHome - -set JAVA_EXE=java.exe -%JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto init - -echo. -echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:findJavaFromJavaHome -set JAVA_HOME=%JAVA_HOME:"=% -set JAVA_EXE=%JAVA_HOME%/bin/java.exe - -if exist "%JAVA_EXE%" goto init - -echo. -echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:init -@rem Get command-line arguments, handling Windows variants - -if not "%OS%" == "Windows_NT" goto win9xME_args - -:win9xME_args -@rem Slurp the command line arguments. -set CMD_LINE_ARGS= -set _SKIP=2 - -:win9xME_args_slurp -if "x%~1" == "x" goto execute - -set CMD_LINE_ARGS=%* - -:execute -@rem Setup the command line - -set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar - -@rem Execute Gradle -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% - -:end -@rem End local scope for the variables with windows NT shell -if "%ERRORLEVEL%"=="0" goto mainEnd - -:fail -rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of -rem the _cmd.exe /c_ return code! -if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 -exit /b 1 - -:mainEnd -if "%OS%"=="Windows_NT" endlocal - -:omega +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto init + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto init + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:init +@rem Get command-line arguments, handling Windows variants + +if not "%OS%" == "Windows_NT" goto win9xME_args + +:win9xME_args +@rem Slurp the command line arguments. +set CMD_LINE_ARGS= +set _SKIP=2 + +:win9xME_args_slurp +if "x%~1" == "x" goto execute + +set CMD_LINE_ARGS=%* + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/weaver/sdks/corda/src/main/kotlin/com/weaver/corda/sdk/CredentialsExtractor.java b/weaver/sdks/corda/src/main/kotlin/com/weaver/corda/sdk/CredentialsExtractor.java index 597bbf496f..e7d2eb9e1d 100644 --- a/weaver/sdks/corda/src/main/kotlin/com/weaver/corda/sdk/CredentialsExtractor.java +++ b/weaver/sdks/corda/src/main/kotlin/com/weaver/corda/sdk/CredentialsExtractor.java @@ -1,354 +1,354 @@ -/* - * Copyright IBM Corp. All Rights Reserved. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -package com.weaver.corda.sdk; - -import java.io.BufferedReader; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.FileReader; -import java.io.FileWriter; -import java.io.IOException; -import java.security.InvalidKeyException; -import java.security.KeyStore; -import java.security.KeyStoreException; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.security.SignatureException; -import java.security.cert.Certificate; -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; -import java.util.Base64; -import java.util.Enumeration; -import java.util.Vector; - -import javax.naming.InvalidNameException; -import javax.naming.ldap.LdapName; -import javax.naming.ldap.Rdn; - -import org.bouncycastle.openssl.jcajce.JcaPEMWriter; - -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonArray; -import com.google.gson.JsonObject; - -/** - * @author VENKATRAMANRAMAKRISH - * - */ -public class CredentialsExtractor { - - private static boolean isSelfSignedCertificate(X509Certificate xcert) { - if (xcert == null) throw new NullPointerException("No certificate provided"); - if (!xcert.getIssuerX500Principal().getName().equals(xcert.getSubjectX500Principal().getName())) { - return false; - } - try { - xcert.verify(xcert.getPublicKey()); - } catch (InvalidKeyException | CertificateException | NoSuchAlgorithmException | NoSuchProviderException - | SignatureException e) { - e.printStackTrace(); - return false; - } - return true; - } - - // Assume that the chain rises up in the hierarchy from low to high index - private static boolean isValidCertificateChain(X509Certificate xcerts[]) { - for (int i = 0 ; i< xcerts.length ; i++) { - if (i == xcerts.length - 1) { - return isSelfSignedCertificate(xcerts[i]); - } else { - if (!xcerts[i].getIssuerX500Principal().getName().equals(xcerts[i + 1].getSubjectX500Principal().getName())) { - return false; - } - try { - xcerts[i].verify(xcerts[i + 1].getPublicKey()); - } catch (InvalidKeyException | CertificateException | NoSuchAlgorithmException | NoSuchProviderException - | SignatureException e) { - e.printStackTrace(); - return false; - } - } - } - return true; - } - - private static boolean loadKeyStore(KeyStore ks, String path) { - FileInputStream fis = null; - try { - fis = new FileInputStream(new File(path)); // Trust store file path - } catch (FileNotFoundException e) { - e.printStackTrace(); - } - try { - ks.load(fis, null); - fis.close(); - return true; - } catch (NoSuchAlgorithmException | CertificateException | IOException e) { - e.printStackTrace(); - } - return false; - } - - private static String readFile(String path) { - try { - FileReader fr = new FileReader(path); - BufferedReader br = new BufferedReader(fr); - StringBuffer pem = new StringBuffer(); - while(true) { - String line = br.readLine(); - if (line == null) { - break; - } - if (pem.length() > 0) { - pem.append("\n"); - } - pem.append(line.trim()); - } - br.close(); - fr.close(); - return Base64.getEncoder().encodeToString(pem.toString().getBytes()); - } catch (IOException e) { - e.printStackTrace(); - } - return null; - } - - private static String getRootCertPEM(KeyStore ks, String trustStore, String tempStore) { - if (!loadKeyStore(ks, trustStore)) { - return null; - } - try { - Enumeration aliases = ks.aliases(); - while (aliases.hasMoreElements()) { - String alias = aliases.nextElement(); - System.out.println("Trust store alias:" + alias); - if (ks.isCertificateEntry(alias)) { - System.out.println("This is a certificate alias"); - } else { - continue; - } - Certificate cert = ks.getCertificate(alias); - if (cert != null) { - if (cert.getType().equals("X.509")) { - X509Certificate xcert = (X509Certificate) cert; - System.out.println(xcert.getIssuerX500Principal().getName()); - System.out.println(xcert.getSubjectX500Principal().getName()); - System.out.println(xcert.getCriticalExtensionOIDs()); - if (isSelfSignedCertificate(xcert)) { - System.out.println("This is a Self-signed certificate"); - } - else { - return null; - } - try { - File outputDir = new File(tempStore + "root/"); - if (!outputDir.exists()) { - outputDir.mkdirs(); - } - JcaPEMWriter xwriter = new JcaPEMWriter(new FileWriter(tempStore + "root/rootcert.pem")); - xwriter.writeObject(xcert); - xwriter.close(); - return readFile(tempStore + "root/rootcert.pem"); - } catch (IOException e) { - e.printStackTrace(); - break; - } - } - } - } - } catch (KeyStoreException e) { - e.printStackTrace(); - } - return null; - } - - // Return sequence: , , , , ,.... - private static Vector getCertChain(KeyStore ks, String nodeKeyStorePath, String tmpStore, String[] tmpCertfiles) { - Vector chainCerts = new Vector(); - if (!loadKeyStore(ks, nodeKeyStorePath)) { - return null; - } - try { - Enumeration aliases = ks.aliases(); - while (aliases.hasMoreElements()) { - String alias = aliases.nextElement(); - Certificate[] certs = ks.getCertificateChain(alias); - if (certs.length == 4) { - X509Certificate xcerts[] = new X509Certificate[certs.length]; - for (int i = 0 ; i < certs.length ; i++) { - if (!certs[i].getType().equals("X.509")) { - continue; - } - X509Certificate xcert = (X509Certificate) certs[i]; - xcerts[i] = xcert; - } - if (!isValidCertificateChain(xcerts)) { - continue; - } - int certExtractCount = 1; - if (chainCerts.size() == 0) { - certExtractCount = 4; - } - for (int i = 0 ; i < certExtractCount ; i++) { - try { - File outputDir = new File(tmpStore); - if (!outputDir.exists()) { - outputDir.mkdirs(); - } - String filePath = tmpStore + tmpCertfiles[i] + ".pem"; - JcaPEMWriter xwriter = new JcaPEMWriter(new FileWriter(filePath)); - xwriter.writeObject(xcerts[i]); - xwriter.close(); - if (i == 0) { - chainCerts.add(readFile(filePath)); - } else if (i == 1) { - String xcertOrg = null; - try { - LdapName identity = new LdapName(xcerts[0].getSubjectX500Principal().getName()); - for (Rdn rdn: identity.getRdns()) { - if (rdn.getType().equals("O")) { - xcertOrg = rdn.getValue().toString(); - } - } - } catch (InvalidNameException e) { - e.printStackTrace(); - return null; - } - if (xcertOrg == null) { - return null; - } - chainCerts.add(0, xcertOrg); - chainCerts.add(1, readFile(filePath)); - } else if (i == 2) { - chainCerts.add(1, readFile(filePath)); - } else if (i == 3) { - chainCerts.add(1, readFile(filePath)); - } - } catch (IOException e) { - e.printStackTrace(); - return null; - } - } - } - } - } catch (KeyStoreException e) { - e.printStackTrace(); - return null; - } - return chainCerts; - } - - private static JsonObject getNodeIdCertChain(KeyStore ks, JsonObject configObj, String nodeKeyStorePath, String tempStore) { - String tmpStore = tempStore + "node/"; - String[] tmpCertfiles = new String[] { "nodeIdentity", "nodeCA", "doormanCA", "rootCA" }; - Vector certs = getCertChain(ks, nodeKeyStorePath, tmpStore, tmpCertfiles); - if (certs == null || certs.size() <= 3) { - return null; - } - configObj.addProperty("name", certs.elementAt(0)); - JsonArray rootArr = new JsonArray(); - rootArr.add(certs.elementAt(1)); - configObj.add("root_certs", rootArr); - JsonArray doormanArr = new JsonArray(); - doormanArr.add(certs.elementAt(2)); - configObj.add("doorman_certs", doormanArr); - JsonArray nodeCA = new JsonArray(); - nodeCA.add(certs.elementAt(3)); - configObj.add("nodeca_certs", nodeCA); - JsonArray nodeIdCert = new JsonArray(); - nodeIdCert.add(certs.elementAt(4)); - configObj.add("nodeid_cert", nodeIdCert); - /*JsonArray idArr = new JsonArray(); - for (int i = 3 ; i < certs.size() ; i++) { - idArr.add(certs.elementAt(i)); - } - obj.add("admins", idArr);*/ - return configObj; - } - - private static JsonObject getNodeTlsCertChain(KeyStore ks, JsonObject configObj, String nodeKeyStorePath, String tempStore) { - String tmpStore = tempStore + "ssl/"; - String[] tmpCertfiles = new String[] { "nodeTlsCert", "nodeTlsCA", "doormanTlsCA", "rootTlsCA" }; - Vector certs = getCertChain(ks, nodeKeyStorePath, tmpStore, tmpCertfiles); - if (certs == null || certs.size() <= 3) { - return null; - } - JsonArray rootArr = new JsonArray(); - rootArr.add(certs.elementAt(1)); - configObj.add("tls_root_certs", rootArr); - JsonArray doormanArr = new JsonArray(); - doormanArr.add(certs.elementAt(2)); - doormanArr.add(certs.elementAt(3)); - configObj.add("tls_intermediate_certs", doormanArr); - return configObj; - } - - private static void deleteFolder(File folder) { - if (folder.isDirectory()) { - for (File subf: folder.listFiles()) { - deleteFolder(subf); - } - } - folder.delete(); - } - - public static String getConfig(String baseNodesPath, String[] nodes) { - KeyStore ks = null; - try { - ks = KeyStore.getInstance(KeyStore.getDefaultType()); - } catch (KeyStoreException e) { - e.printStackTrace(); - return null; - } - JsonObject configObj = new JsonObject(); - for (String node: nodes) { - String nodePath = baseNodesPath + node + "/certificates"; - String tempStore = nodePath + "/tmp/"; - JsonObject nodeConfigObj = new JsonObject(); - nodeConfigObj = getNodeIdCertChain(ks, nodeConfigObj, nodePath + "/nodekeystore.jks", tempStore); - if (configObj == null) { - System.out.println("Unable to extract node certificate chain"); - break; - } else { - //nodeConfigObj = getNodeTlsCertChain(ks, nodeConfigObj, nodePath + "/sslkeystore.jks", tempStore); - //if (nodeConfigObj == null) { - // System.out.println("Unable to extract TLS certificate chain"); - // break; - //} else { - // configObj.add(node, nodeConfigObj); - //} - configObj.add(node, nodeConfigObj); - } - deleteFolder(new File(tempStore)); - System.out.println("Extracted configuration for " + node); - } - System.out.println("Extracted configuration for all nodes"); - Gson gson = new GsonBuilder().setPrettyPrinting().disableHtmlEscaping().create(); - return gson.toJson(configObj); - } - - /*public static void main(String[] args) { - String baseNodesPath = "store/certs/"; - String config = getConfig(baseNodesPath); - if (config == null) { - System.out.println("Unable to get config"); - } else { - try { - FileWriter fw = new FileWriter(baseNodesPath + "config.json"); - fw.write(config); - fw.close(); - } catch (IOException e) { - e.printStackTrace(); - } - System.out.println("Written configuration JSON to file"); - } - }*/ - -} +/* + * Copyright IBM Corp. All Rights Reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package com.weaver.corda.sdk; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; +import java.security.InvalidKeyException; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.NoSuchProviderException; +import java.security.SignatureException; +import java.security.cert.Certificate; +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; +import java.util.Base64; +import java.util.Enumeration; +import java.util.Vector; + +import javax.naming.InvalidNameException; +import javax.naming.ldap.LdapName; +import javax.naming.ldap.Rdn; + +import org.bouncycastle.openssl.jcajce.JcaPEMWriter; + +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import com.google.gson.JsonArray; +import com.google.gson.JsonObject; + +/** + * @author VENKATRAMANRAMAKRISH + * + */ +public class CredentialsExtractor { + + private static boolean isSelfSignedCertificate(X509Certificate xcert) { + if (xcert == null) throw new NullPointerException("No certificate provided"); + if (!xcert.getIssuerX500Principal().getName().equals(xcert.getSubjectX500Principal().getName())) { + return false; + } + try { + xcert.verify(xcert.getPublicKey()); + } catch (InvalidKeyException | CertificateException | NoSuchAlgorithmException | NoSuchProviderException + | SignatureException e) { + e.printStackTrace(); + return false; + } + return true; + } + + // Assume that the chain rises up in the hierarchy from low to high index + private static boolean isValidCertificateChain(X509Certificate xcerts[]) { + for (int i = 0 ; i< xcerts.length ; i++) { + if (i == xcerts.length - 1) { + return isSelfSignedCertificate(xcerts[i]); + } else { + if (!xcerts[i].getIssuerX500Principal().getName().equals(xcerts[i + 1].getSubjectX500Principal().getName())) { + return false; + } + try { + xcerts[i].verify(xcerts[i + 1].getPublicKey()); + } catch (InvalidKeyException | CertificateException | NoSuchAlgorithmException | NoSuchProviderException + | SignatureException e) { + e.printStackTrace(); + return false; + } + } + } + return true; + } + + private static boolean loadKeyStore(KeyStore ks, String path) { + FileInputStream fis = null; + try { + fis = new FileInputStream(new File(path)); // Trust store file path + } catch (FileNotFoundException e) { + e.printStackTrace(); + } + try { + ks.load(fis, null); + fis.close(); + return true; + } catch (NoSuchAlgorithmException | CertificateException | IOException e) { + e.printStackTrace(); + } + return false; + } + + private static String readFile(String path) { + try { + FileReader fr = new FileReader(path); + BufferedReader br = new BufferedReader(fr); + StringBuffer pem = new StringBuffer(); + while(true) { + String line = br.readLine(); + if (line == null) { + break; + } + if (pem.length() > 0) { + pem.append("\n"); + } + pem.append(line.trim()); + } + br.close(); + fr.close(); + return Base64.getEncoder().encodeToString(pem.toString().getBytes()); + } catch (IOException e) { + e.printStackTrace(); + } + return null; + } + + private static String getRootCertPEM(KeyStore ks, String trustStore, String tempStore) { + if (!loadKeyStore(ks, trustStore)) { + return null; + } + try { + Enumeration aliases = ks.aliases(); + while (aliases.hasMoreElements()) { + String alias = aliases.nextElement(); + System.out.println("Trust store alias:" + alias); + if (ks.isCertificateEntry(alias)) { + System.out.println("This is a certificate alias"); + } else { + continue; + } + Certificate cert = ks.getCertificate(alias); + if (cert != null) { + if (cert.getType().equals("X.509")) { + X509Certificate xcert = (X509Certificate) cert; + System.out.println(xcert.getIssuerX500Principal().getName()); + System.out.println(xcert.getSubjectX500Principal().getName()); + System.out.println(xcert.getCriticalExtensionOIDs()); + if (isSelfSignedCertificate(xcert)) { + System.out.println("This is a Self-signed certificate"); + } + else { + return null; + } + try { + File outputDir = new File(tempStore + "root/"); + if (!outputDir.exists()) { + outputDir.mkdirs(); + } + JcaPEMWriter xwriter = new JcaPEMWriter(new FileWriter(tempStore + "root/rootcert.pem")); + xwriter.writeObject(xcert); + xwriter.close(); + return readFile(tempStore + "root/rootcert.pem"); + } catch (IOException e) { + e.printStackTrace(); + break; + } + } + } + } + } catch (KeyStoreException e) { + e.printStackTrace(); + } + return null; + } + + // Return sequence: , , , , ,.... + private static Vector getCertChain(KeyStore ks, String nodeKeyStorePath, String tmpStore, String[] tmpCertfiles) { + Vector chainCerts = new Vector(); + if (!loadKeyStore(ks, nodeKeyStorePath)) { + return null; + } + try { + Enumeration aliases = ks.aliases(); + while (aliases.hasMoreElements()) { + String alias = aliases.nextElement(); + Certificate[] certs = ks.getCertificateChain(alias); + if (certs.length == 4) { + X509Certificate xcerts[] = new X509Certificate[certs.length]; + for (int i = 0 ; i < certs.length ; i++) { + if (!certs[i].getType().equals("X.509")) { + continue; + } + X509Certificate xcert = (X509Certificate) certs[i]; + xcerts[i] = xcert; + } + if (!isValidCertificateChain(xcerts)) { + continue; + } + int certExtractCount = 1; + if (chainCerts.size() == 0) { + certExtractCount = 4; + } + for (int i = 0 ; i < certExtractCount ; i++) { + try { + File outputDir = new File(tmpStore); + if (!outputDir.exists()) { + outputDir.mkdirs(); + } + String filePath = tmpStore + tmpCertfiles[i] + ".pem"; + JcaPEMWriter xwriter = new JcaPEMWriter(new FileWriter(filePath)); + xwriter.writeObject(xcerts[i]); + xwriter.close(); + if (i == 0) { + chainCerts.add(readFile(filePath)); + } else if (i == 1) { + String xcertOrg = null; + try { + LdapName identity = new LdapName(xcerts[0].getSubjectX500Principal().getName()); + for (Rdn rdn: identity.getRdns()) { + if (rdn.getType().equals("O")) { + xcertOrg = rdn.getValue().toString(); + } + } + } catch (InvalidNameException e) { + e.printStackTrace(); + return null; + } + if (xcertOrg == null) { + return null; + } + chainCerts.add(0, xcertOrg); + chainCerts.add(1, readFile(filePath)); + } else if (i == 2) { + chainCerts.add(1, readFile(filePath)); + } else if (i == 3) { + chainCerts.add(1, readFile(filePath)); + } + } catch (IOException e) { + e.printStackTrace(); + return null; + } + } + } + } + } catch (KeyStoreException e) { + e.printStackTrace(); + return null; + } + return chainCerts; + } + + private static JsonObject getNodeIdCertChain(KeyStore ks, JsonObject configObj, String nodeKeyStorePath, String tempStore) { + String tmpStore = tempStore + "node/"; + String[] tmpCertfiles = new String[] { "nodeIdentity", "nodeCA", "doormanCA", "rootCA" }; + Vector certs = getCertChain(ks, nodeKeyStorePath, tmpStore, tmpCertfiles); + if (certs == null || certs.size() <= 3) { + return null; + } + configObj.addProperty("name", certs.elementAt(0)); + JsonArray rootArr = new JsonArray(); + rootArr.add(certs.elementAt(1)); + configObj.add("root_certs", rootArr); + JsonArray doormanArr = new JsonArray(); + doormanArr.add(certs.elementAt(2)); + configObj.add("doorman_certs", doormanArr); + JsonArray nodeCA = new JsonArray(); + nodeCA.add(certs.elementAt(3)); + configObj.add("nodeca_certs", nodeCA); + JsonArray nodeIdCert = new JsonArray(); + nodeIdCert.add(certs.elementAt(4)); + configObj.add("nodeid_cert", nodeIdCert); + /*JsonArray idArr = new JsonArray(); + for (int i = 3 ; i < certs.size() ; i++) { + idArr.add(certs.elementAt(i)); + } + obj.add("admins", idArr);*/ + return configObj; + } + + private static JsonObject getNodeTlsCertChain(KeyStore ks, JsonObject configObj, String nodeKeyStorePath, String tempStore) { + String tmpStore = tempStore + "ssl/"; + String[] tmpCertfiles = new String[] { "nodeTlsCert", "nodeTlsCA", "doormanTlsCA", "rootTlsCA" }; + Vector certs = getCertChain(ks, nodeKeyStorePath, tmpStore, tmpCertfiles); + if (certs == null || certs.size() <= 3) { + return null; + } + JsonArray rootArr = new JsonArray(); + rootArr.add(certs.elementAt(1)); + configObj.add("tls_root_certs", rootArr); + JsonArray doormanArr = new JsonArray(); + doormanArr.add(certs.elementAt(2)); + doormanArr.add(certs.elementAt(3)); + configObj.add("tls_intermediate_certs", doormanArr); + return configObj; + } + + private static void deleteFolder(File folder) { + if (folder.isDirectory()) { + for (File subf: folder.listFiles()) { + deleteFolder(subf); + } + } + folder.delete(); + } + + public static String getConfig(String baseNodesPath, String[] nodes) { + KeyStore ks = null; + try { + ks = KeyStore.getInstance(KeyStore.getDefaultType()); + } catch (KeyStoreException e) { + e.printStackTrace(); + return null; + } + JsonObject configObj = new JsonObject(); + for (String node: nodes) { + String nodePath = baseNodesPath + node + "/certificates"; + String tempStore = nodePath + "/tmp/"; + JsonObject nodeConfigObj = new JsonObject(); + nodeConfigObj = getNodeIdCertChain(ks, nodeConfigObj, nodePath + "/nodekeystore.jks", tempStore); + if (configObj == null) { + System.out.println("Unable to extract node certificate chain"); + break; + } else { + //nodeConfigObj = getNodeTlsCertChain(ks, nodeConfigObj, nodePath + "/sslkeystore.jks", tempStore); + //if (nodeConfigObj == null) { + // System.out.println("Unable to extract TLS certificate chain"); + // break; + //} else { + // configObj.add(node, nodeConfigObj); + //} + configObj.add(node, nodeConfigObj); + } + deleteFolder(new File(tempStore)); + System.out.println("Extracted configuration for " + node); + } + System.out.println("Extracted configuration for all nodes"); + Gson gson = new GsonBuilder().setPrettyPrinting().disableHtmlEscaping().create(); + return gson.toJson(configObj); + } + + /*public static void main(String[] args) { + String baseNodesPath = "store/certs/"; + String config = getConfig(baseNodesPath); + if (config == null) { + System.out.println("Unable to get config"); + } else { + try { + FileWriter fw = new FileWriter(baseNodesPath + "config.json"); + fw.write(config); + fw.close(); + } catch (IOException e) { + e.printStackTrace(); + } + System.out.println("Written configuration JSON to file"); + } + }*/ + +} diff --git a/weaver/tests/network-setups/corda/shared/Corda_Network/tmp/jar/MANIFEST.MF b/weaver/tests/network-setups/corda/shared/Corda_Network/tmp/jar/MANIFEST.MF index 58630c02ef..59499bce4a 100644 --- a/weaver/tests/network-setups/corda/shared/Corda_Network/tmp/jar/MANIFEST.MF +++ b/weaver/tests/network-setups/corda/shared/Corda_Network/tmp/jar/MANIFEST.MF @@ -1,2 +1,2 @@ -Manifest-Version: 1.0 - +Manifest-Version: 1.0 + diff --git a/weaver/tests/network-setups/corda/shared/Corda_Network2/tmp/jar/MANIFEST.MF b/weaver/tests/network-setups/corda/shared/Corda_Network2/tmp/jar/MANIFEST.MF index 58630c02ef..59499bce4a 100644 --- a/weaver/tests/network-setups/corda/shared/Corda_Network2/tmp/jar/MANIFEST.MF +++ b/weaver/tests/network-setups/corda/shared/Corda_Network2/tmp/jar/MANIFEST.MF @@ -1,2 +1,2 @@ -Manifest-Version: 1.0 - +Manifest-Version: 1.0 +