From ba1a97666cc3a95c3ab257e59070e8db7ea703ba Mon Sep 17 00:00:00 2001
From: Qiaoqiao Zhang <55688292+qiaozha@users.noreply.github.com>
Date: Tue, 16 Jun 2020 10:07:55 +0800
Subject: [PATCH] Release for mgmt datafactory (#9437)
---
sdk/datafactory/arm-datafactory/package.json | 2 +-
.../src/dataFactoryManagementClientContext.ts | 2 +-
.../src/models/dataFlowDebugSessionMappers.ts | 7 +
.../src/models/dataFlowsMappers.ts | 23 +
.../src/models/datasetsMappers.ts | 23 +
.../src/models/factoriesMappers.ts | 23 +
.../arm-datafactory/src/models/index.ts | 3895 +++++++++-----
.../src/models/integrationRuntimesMappers.ts | 23 +
.../src/models/linkedServicesMappers.ts | 23 +
.../arm-datafactory/src/models/mappers.ts | 4664 +++++++++++------
.../src/models/pipelinesMappers.ts | 23 +
.../src/models/triggersMappers.ts | 23 +
12 files changed, 5547 insertions(+), 3184 deletions(-)
diff --git a/sdk/datafactory/arm-datafactory/package.json b/sdk/datafactory/arm-datafactory/package.json
index b07e7a940cbb..c6772adaa277 100644
--- a/sdk/datafactory/arm-datafactory/package.json
+++ b/sdk/datafactory/arm-datafactory/package.json
@@ -2,7 +2,7 @@
"name": "@azure/arm-datafactory",
"author": "Microsoft Corporation",
"description": "DataFactoryManagementClient Library with typescript type definitions for node.js and browser.",
- "version": "7.0.0",
+ "version": "7.1.0",
"dependencies": {
"@azure/ms-rest-azure-js": "^2.0.1",
"@azure/ms-rest-js": "^2.0.4",
diff --git a/sdk/datafactory/arm-datafactory/src/dataFactoryManagementClientContext.ts b/sdk/datafactory/arm-datafactory/src/dataFactoryManagementClientContext.ts
index 48bd8b27ddac..9379c3c3572c 100644
--- a/sdk/datafactory/arm-datafactory/src/dataFactoryManagementClientContext.ts
+++ b/sdk/datafactory/arm-datafactory/src/dataFactoryManagementClientContext.ts
@@ -13,7 +13,7 @@ import * as msRest from "@azure/ms-rest-js";
import * as msRestAzure from "@azure/ms-rest-azure-js";
const packageName = "@azure/arm-datafactory";
-const packageVersion = "7.0.0";
+const packageVersion = "7.1.0";
export class DataFactoryManagementClientContext extends msRestAzure.AzureServiceClient {
credentials: msRest.ServiceClientCredentials;
diff --git a/sdk/datafactory/arm-datafactory/src/models/dataFlowDebugSessionMappers.ts b/sdk/datafactory/arm-datafactory/src/models/dataFlowDebugSessionMappers.ts
index 0d470f86b98c..b9ccb6592dde 100644
--- a/sdk/datafactory/arm-datafactory/src/models/dataFlowDebugSessionMappers.ts
+++ b/sdk/datafactory/arm-datafactory/src/models/dataFlowDebugSessionMappers.ts
@@ -120,6 +120,7 @@ export {
EloquaObjectDataset,
EntityReference,
EnvironmentVariableSetup,
+ ExcelDataset,
FileServerLinkedService,
FileServerLocation,
FileShareDataset,
@@ -200,6 +201,7 @@ export {
OracleTableDataset,
OrcDataset,
OrcFormat,
+ PackageStore,
ParameterSpecification,
ParquetDataset,
ParquetFormat,
@@ -245,8 +247,12 @@ export {
ServiceNowObjectDataset,
SftpLocation,
SftpServerLinkedService,
+ SharePointOnlineListLinkedService,
+ SharePointOnlineListResourceDataset,
ShopifyLinkedService,
ShopifyObjectDataset,
+ SnowflakeDataset,
+ SnowflakeLinkedService,
SparkLinkedService,
SparkObjectDataset,
SqlServerLinkedService,
@@ -270,6 +276,7 @@ export {
WebTableDataset,
XeroLinkedService,
XeroObjectDataset,
+ XmlDataset,
ZohoLinkedService,
ZohoObjectDataset
} from "../models/mappers";
diff --git a/sdk/datafactory/arm-datafactory/src/models/dataFlowsMappers.ts b/sdk/datafactory/arm-datafactory/src/models/dataFlowsMappers.ts
index 9d3716d531f6..c3dfbe1cfa58 100644
--- a/sdk/datafactory/arm-datafactory/src/models/dataFlowsMappers.ts
+++ b/sdk/datafactory/arm-datafactory/src/models/dataFlowsMappers.ts
@@ -98,6 +98,7 @@ export {
AzureTableStorageLinkedService,
BaseResource,
BinaryDataset,
+ BinaryReadSettings,
BinarySink,
BinarySource,
BlobEventsTrigger,
@@ -115,6 +116,7 @@ export {
CommonDataServiceForAppsSink,
CommonDataServiceForAppsSource,
ComponentSetup,
+ CompressionReadSettings,
ConcurLinkedService,
ConcurObjectDataset,
ConcurSource,
@@ -196,11 +198,14 @@ export {
EloquaSource,
EntityReference,
EnvironmentVariableSetup,
+ ExcelDataset,
+ ExcelSource,
ExecuteDataFlowActivity,
ExecuteDataFlowActivityTypePropertiesCompute,
ExecutePipelineActivity,
ExecuteSSISPackageActivity,
ExecutionActivity,
+ ExportSettings,
Expression,
Factory,
FactoryGitHubConfiguration,
@@ -222,6 +227,7 @@ export {
FtpServerLinkedService,
FtpServerLocation,
GetMetadataActivity,
+ GlobalParameterSpecification,
GoogleAdWordsLinkedService,
GoogleAdWordsObjectDataset,
GoogleAdWordsSource,
@@ -263,6 +269,7 @@ export {
ImpalaLinkedService,
ImpalaObjectDataset,
ImpalaSource,
+ ImportSettings,
InformixLinkedService,
InformixSink,
InformixSource,
@@ -282,6 +289,7 @@ export {
JiraSource,
JsonDataset,
JsonFormat,
+ JsonReadSettings,
JsonSink,
JsonSource,
JsonWriteSettings,
@@ -345,6 +353,7 @@ export {
OrcFormat,
OrcSink,
OrcSource,
+ PackageStore,
ParameterSpecification,
ParquetDataset,
ParquetFormat,
@@ -431,10 +440,19 @@ export {
SftpReadSettings,
SftpServerLinkedService,
SftpWriteSettings,
+ SharePointOnlineListLinkedService,
+ SharePointOnlineListResourceDataset,
+ SharePointOnlineListSource,
ShopifyLinkedService,
ShopifyObjectDataset,
ShopifySource,
SkipErrorFile,
+ SnowflakeDataset,
+ SnowflakeExportCopyCommand,
+ SnowflakeImportCopyCommand,
+ SnowflakeLinkedService,
+ SnowflakeSink,
+ SnowflakeSource,
SparkLinkedService,
SparkObjectDataset,
SparkSource,
@@ -442,6 +460,7 @@ export {
SqlDWSource,
SqlMISink,
SqlMISource,
+ SqlPartitionSettings,
SqlServerLinkedService,
SqlServerSink,
SqlServerSource,
@@ -504,6 +523,10 @@ export {
XeroLinkedService,
XeroObjectDataset,
XeroSource,
+ XmlDataset,
+ XmlReadSettings,
+ XmlSource,
+ ZipDeflateReadSettings,
ZohoLinkedService,
ZohoObjectDataset,
ZohoSource
diff --git a/sdk/datafactory/arm-datafactory/src/models/datasetsMappers.ts b/sdk/datafactory/arm-datafactory/src/models/datasetsMappers.ts
index 4fd2bf0b1967..dd56da18e57b 100644
--- a/sdk/datafactory/arm-datafactory/src/models/datasetsMappers.ts
+++ b/sdk/datafactory/arm-datafactory/src/models/datasetsMappers.ts
@@ -98,6 +98,7 @@ export {
AzureTableStorageLinkedService,
BaseResource,
BinaryDataset,
+ BinaryReadSettings,
BinarySink,
BinarySource,
BlobEventsTrigger,
@@ -115,6 +116,7 @@ export {
CommonDataServiceForAppsSink,
CommonDataServiceForAppsSource,
ComponentSetup,
+ CompressionReadSettings,
ConcurLinkedService,
ConcurObjectDataset,
ConcurSource,
@@ -196,11 +198,14 @@ export {
EloquaSource,
EntityReference,
EnvironmentVariableSetup,
+ ExcelDataset,
+ ExcelSource,
ExecuteDataFlowActivity,
ExecuteDataFlowActivityTypePropertiesCompute,
ExecutePipelineActivity,
ExecuteSSISPackageActivity,
ExecutionActivity,
+ ExportSettings,
Expression,
Factory,
FactoryGitHubConfiguration,
@@ -222,6 +227,7 @@ export {
FtpServerLinkedService,
FtpServerLocation,
GetMetadataActivity,
+ GlobalParameterSpecification,
GoogleAdWordsLinkedService,
GoogleAdWordsObjectDataset,
GoogleAdWordsSource,
@@ -263,6 +269,7 @@ export {
ImpalaLinkedService,
ImpalaObjectDataset,
ImpalaSource,
+ ImportSettings,
InformixLinkedService,
InformixSink,
InformixSource,
@@ -282,6 +289,7 @@ export {
JiraSource,
JsonDataset,
JsonFormat,
+ JsonReadSettings,
JsonSink,
JsonSource,
JsonWriteSettings,
@@ -345,6 +353,7 @@ export {
OrcFormat,
OrcSink,
OrcSource,
+ PackageStore,
ParameterSpecification,
ParquetDataset,
ParquetFormat,
@@ -431,10 +440,19 @@ export {
SftpReadSettings,
SftpServerLinkedService,
SftpWriteSettings,
+ SharePointOnlineListLinkedService,
+ SharePointOnlineListResourceDataset,
+ SharePointOnlineListSource,
ShopifyLinkedService,
ShopifyObjectDataset,
ShopifySource,
SkipErrorFile,
+ SnowflakeDataset,
+ SnowflakeExportCopyCommand,
+ SnowflakeImportCopyCommand,
+ SnowflakeLinkedService,
+ SnowflakeSink,
+ SnowflakeSource,
SparkLinkedService,
SparkObjectDataset,
SparkSource,
@@ -442,6 +460,7 @@ export {
SqlDWSource,
SqlMISink,
SqlMISource,
+ SqlPartitionSettings,
SqlServerLinkedService,
SqlServerSink,
SqlServerSource,
@@ -504,6 +523,10 @@ export {
XeroLinkedService,
XeroObjectDataset,
XeroSource,
+ XmlDataset,
+ XmlReadSettings,
+ XmlSource,
+ ZipDeflateReadSettings,
ZohoLinkedService,
ZohoObjectDataset,
ZohoSource
diff --git a/sdk/datafactory/arm-datafactory/src/models/factoriesMappers.ts b/sdk/datafactory/arm-datafactory/src/models/factoriesMappers.ts
index 1e2234e64d0a..2b005e64716a 100644
--- a/sdk/datafactory/arm-datafactory/src/models/factoriesMappers.ts
+++ b/sdk/datafactory/arm-datafactory/src/models/factoriesMappers.ts
@@ -99,6 +99,7 @@ export {
AzureTableStorageLinkedService,
BaseResource,
BinaryDataset,
+ BinaryReadSettings,
BinarySink,
BinarySource,
BlobEventsTrigger,
@@ -116,6 +117,7 @@ export {
CommonDataServiceForAppsSink,
CommonDataServiceForAppsSource,
ComponentSetup,
+ CompressionReadSettings,
ConcurLinkedService,
ConcurObjectDataset,
ConcurSource,
@@ -196,11 +198,14 @@ export {
EloquaSource,
EntityReference,
EnvironmentVariableSetup,
+ ExcelDataset,
+ ExcelSource,
ExecuteDataFlowActivity,
ExecuteDataFlowActivityTypePropertiesCompute,
ExecutePipelineActivity,
ExecuteSSISPackageActivity,
ExecutionActivity,
+ ExportSettings,
Expression,
Factory,
FactoryGitHubConfiguration,
@@ -227,6 +232,7 @@ export {
GetMetadataActivity,
GitHubAccessTokenRequest,
GitHubAccessTokenResponse,
+ GlobalParameterSpecification,
GoogleAdWordsLinkedService,
GoogleAdWordsObjectDataset,
GoogleAdWordsSource,
@@ -268,6 +274,7 @@ export {
ImpalaLinkedService,
ImpalaObjectDataset,
ImpalaSource,
+ ImportSettings,
InformixLinkedService,
InformixSink,
InformixSource,
@@ -287,6 +294,7 @@ export {
JiraSource,
JsonDataset,
JsonFormat,
+ JsonReadSettings,
JsonSink,
JsonSource,
JsonWriteSettings,
@@ -350,6 +358,7 @@ export {
OrcFormat,
OrcSink,
OrcSource,
+ PackageStore,
ParameterSpecification,
ParquetDataset,
ParquetFormat,
@@ -436,10 +445,19 @@ export {
SftpReadSettings,
SftpServerLinkedService,
SftpWriteSettings,
+ SharePointOnlineListLinkedService,
+ SharePointOnlineListResourceDataset,
+ SharePointOnlineListSource,
ShopifyLinkedService,
ShopifyObjectDataset,
ShopifySource,
SkipErrorFile,
+ SnowflakeDataset,
+ SnowflakeExportCopyCommand,
+ SnowflakeImportCopyCommand,
+ SnowflakeLinkedService,
+ SnowflakeSink,
+ SnowflakeSource,
SparkLinkedService,
SparkObjectDataset,
SparkSource,
@@ -447,6 +465,7 @@ export {
SqlDWSource,
SqlMISink,
SqlMISource,
+ SqlPartitionSettings,
SqlServerLinkedService,
SqlServerSink,
SqlServerSource,
@@ -510,6 +529,10 @@ export {
XeroLinkedService,
XeroObjectDataset,
XeroSource,
+ XmlDataset,
+ XmlReadSettings,
+ XmlSource,
+ ZipDeflateReadSettings,
ZohoLinkedService,
ZohoObjectDataset,
ZohoSource
diff --git a/sdk/datafactory/arm-datafactory/src/models/index.ts b/sdk/datafactory/arm-datafactory/src/models/index.ts
index 3cb4579316c5..6da41a2fbc6a 100644
--- a/sdk/datafactory/arm-datafactory/src/models/index.ts
+++ b/sdk/datafactory/arm-datafactory/src/models/index.ts
@@ -210,6 +210,21 @@ export interface FactoryRepoConfiguration {
lastCommitId?: string;
}
+/**
+ * Definition of a single parameter for an entity.
+ */
+export interface GlobalParameterSpecification {
+ /**
+ * Global Parameter type. Possible values include: 'Object', 'String', 'Int', 'Float', 'Bool',
+ * 'Array'
+ */
+ type: GlobalParameterType;
+ /**
+ * Value of parameter.
+ */
+ value: any;
+}
+
/**
* Factory resource type.
*/
@@ -237,6 +252,10 @@ export interface Factory extends Resource {
* Git repo information of the factory.
*/
repoConfiguration?: FactoryRepoConfigurationUnion;
+ /**
+ * List of parameters for factory.
+ */
+ globalParameters?: { [propertyName: string]: GlobalParameterSpecification };
/**
* Describes unknown properties. The value of an unknown property can be of "any" type.
*/
@@ -436,7 +455,7 @@ export interface ParameterSpecification {
/**
* Contains the possible cases for LinkedService.
*/
-export type LinkedServiceUnion = LinkedService | AzureFunctionLinkedService | AzureDataExplorerLinkedService | SapTableLinkedService | GoogleAdWordsLinkedService | OracleServiceCloudLinkedService | DynamicsAXLinkedService | ResponsysLinkedService | AzureDatabricksLinkedService | AzureDataLakeAnalyticsLinkedService | HDInsightOnDemandLinkedService | SalesforceMarketingCloudLinkedService | NetezzaLinkedService | VerticaLinkedService | ZohoLinkedService | XeroLinkedService | SquareLinkedService | SparkLinkedService | ShopifyLinkedService | ServiceNowLinkedService | QuickBooksLinkedService | PrestoLinkedService | PhoenixLinkedService | PaypalLinkedService | MarketoLinkedService | AzureMariaDBLinkedService | MariaDBLinkedService | MagentoLinkedService | JiraLinkedService | ImpalaLinkedService | HubspotLinkedService | HiveLinkedService | HBaseLinkedService | GreenplumLinkedService | GoogleBigQueryLinkedService | EloquaLinkedService | DrillLinkedService | CouchbaseLinkedService | ConcurLinkedService | AzurePostgreSqlLinkedService | AmazonMWSLinkedService | SapHanaLinkedService | SapBWLinkedService | SftpServerLinkedService | FtpServerLinkedService | HttpLinkedService | AzureSearchLinkedService | CustomDataSourceLinkedService | AmazonRedshiftLinkedService | AmazonS3LinkedService | RestServiceLinkedService | SapOpenHubLinkedService | SapEccLinkedService | SapCloudForCustomerLinkedService | SalesforceServiceCloudLinkedService | SalesforceLinkedService | Office365LinkedService | AzureBlobFSLinkedService | AzureDataLakeStoreLinkedService | CosmosDbMongoDbApiLinkedService | MongoDbV2LinkedService | MongoDbLinkedService | CassandraLinkedService | WebLinkedService | ODataLinkedService | HdfsLinkedService | MicrosoftAccessLinkedService | InformixLinkedService | OdbcLinkedService | AzureMLServiceLinkedService | AzureMLLinkedService | TeradataLinkedService | Db2LinkedService | SybaseLinkedService | PostgreSqlLinkedService | MySqlLinkedService | AzureMySqlLinkedService | OracleLinkedService | GoogleCloudStorageLinkedService | AzureFileStorageLinkedService | FileServerLinkedService | HDInsightLinkedService | CommonDataServiceForAppsLinkedService | DynamicsCrmLinkedService | DynamicsLinkedService | CosmosDbLinkedService | AzureKeyVaultLinkedService | AzureBatchLinkedService | AzureSqlMILinkedService | AzureSqlDatabaseLinkedService | SqlServerLinkedService | AzureSqlDWLinkedService | AzureTableStorageLinkedService | AzureBlobStorageLinkedService | AzureStorageLinkedService;
+export type LinkedServiceUnion = LinkedService | SharePointOnlineListLinkedService | SnowflakeLinkedService | AzureFunctionLinkedService | AzureDataExplorerLinkedService | SapTableLinkedService | GoogleAdWordsLinkedService | OracleServiceCloudLinkedService | DynamicsAXLinkedService | ResponsysLinkedService | AzureDatabricksLinkedService | AzureDataLakeAnalyticsLinkedService | HDInsightOnDemandLinkedService | SalesforceMarketingCloudLinkedService | NetezzaLinkedService | VerticaLinkedService | ZohoLinkedService | XeroLinkedService | SquareLinkedService | SparkLinkedService | ShopifyLinkedService | ServiceNowLinkedService | QuickBooksLinkedService | PrestoLinkedService | PhoenixLinkedService | PaypalLinkedService | MarketoLinkedService | AzureMariaDBLinkedService | MariaDBLinkedService | MagentoLinkedService | JiraLinkedService | ImpalaLinkedService | HubspotLinkedService | HiveLinkedService | HBaseLinkedService | GreenplumLinkedService | GoogleBigQueryLinkedService | EloquaLinkedService | DrillLinkedService | CouchbaseLinkedService | ConcurLinkedService | AzurePostgreSqlLinkedService | AmazonMWSLinkedService | SapHanaLinkedService | SapBWLinkedService | SftpServerLinkedService | FtpServerLinkedService | HttpLinkedService | AzureSearchLinkedService | CustomDataSourceLinkedService | AmazonRedshiftLinkedService | AmazonS3LinkedService | RestServiceLinkedService | SapOpenHubLinkedService | SapEccLinkedService | SapCloudForCustomerLinkedService | SalesforceServiceCloudLinkedService | SalesforceLinkedService | Office365LinkedService | AzureBlobFSLinkedService | AzureDataLakeStoreLinkedService | CosmosDbMongoDbApiLinkedService | MongoDbV2LinkedService | MongoDbLinkedService | CassandraLinkedService | WebLinkedService | ODataLinkedService | HdfsLinkedService | MicrosoftAccessLinkedService | InformixLinkedService | OdbcLinkedService | AzureMLServiceLinkedService | AzureMLLinkedService | TeradataLinkedService | Db2LinkedService | SybaseLinkedService | PostgreSqlLinkedService | MySqlLinkedService | AzureMySqlLinkedService | OracleLinkedService | GoogleCloudStorageLinkedService | AzureFileStorageLinkedService | FileServerLinkedService | HDInsightLinkedService | CommonDataServiceForAppsLinkedService | DynamicsCrmLinkedService | DynamicsLinkedService | CosmosDbLinkedService | AzureKeyVaultLinkedService | AzureBatchLinkedService | AzureSqlMILinkedService | AzureSqlDatabaseLinkedService | SqlServerLinkedService | AzureSqlDWLinkedService | AzureTableStorageLinkedService | AzureBlobStorageLinkedService | AzureStorageLinkedService;
/**
* The Azure Data Factory nested object which contains the information and credential which can be
@@ -492,7 +511,7 @@ export interface DatasetFolder {
/**
* Contains the possible cases for Dataset.
*/
-export type DatasetUnion = Dataset | GoogleAdWordsObjectDataset | AzureDataExplorerTableDataset | OracleServiceCloudObjectDataset | DynamicsAXResourceDataset | ResponsysObjectDataset | SalesforceMarketingCloudObjectDataset | VerticaTableDataset | NetezzaTableDataset | ZohoObjectDataset | XeroObjectDataset | SquareObjectDataset | SparkObjectDataset | ShopifyObjectDataset | ServiceNowObjectDataset | QuickBooksObjectDataset | PrestoObjectDataset | PhoenixObjectDataset | PaypalObjectDataset | MarketoObjectDataset | AzureMariaDBTableDataset | MariaDBTableDataset | MagentoObjectDataset | JiraObjectDataset | ImpalaObjectDataset | HubspotObjectDataset | HiveObjectDataset | HBaseObjectDataset | GreenplumTableDataset | GoogleBigQueryObjectDataset | EloquaObjectDataset | DrillTableDataset | CouchbaseTableDataset | ConcurObjectDataset | AzurePostgreSqlTableDataset | AmazonMWSObjectDataset | HttpDataset | AzureSearchIndexDataset | WebTableDataset | SapTableResourceDataset | RestResourceDataset | SqlServerTableDataset | SapOpenHubTableDataset | SapHanaTableDataset | SapEccResourceDataset | SapCloudForCustomerResourceDataset | SapBwCubeDataset | SybaseTableDataset | SalesforceServiceCloudObjectDataset | SalesforceObjectDataset | MicrosoftAccessTableDataset | PostgreSqlTableDataset | MySqlTableDataset | OdbcTableDataset | InformixTableDataset | RelationalTableDataset | Db2TableDataset | AmazonRedshiftTableDataset | AzureMySqlTableDataset | TeradataTableDataset | OracleTableDataset | ODataResourceDataset | CosmosDbMongoDbApiCollectionDataset | MongoDbV2CollectionDataset | MongoDbCollectionDataset | FileShareDataset | Office365Dataset | AzureBlobFSDataset | AzureDataLakeStoreDataset | CommonDataServiceForAppsEntityDataset | DynamicsCrmEntityDataset | DynamicsEntityDataset | DocumentDbCollectionDataset | CosmosDbSqlApiCollectionDataset | CustomDataset | CassandraTableDataset | AzureSqlDWTableDataset | AzureSqlMITableDataset | AzureSqlTableDataset | AzureTableDataset | AzureBlobDataset | BinaryDataset | OrcDataset | JsonDataset | DelimitedTextDataset | ParquetDataset | AvroDataset | AmazonS3Dataset;
+export type DatasetUnion = Dataset | SharePointOnlineListResourceDataset | SnowflakeDataset | GoogleAdWordsObjectDataset | AzureDataExplorerTableDataset | OracleServiceCloudObjectDataset | DynamicsAXResourceDataset | ResponsysObjectDataset | SalesforceMarketingCloudObjectDataset | VerticaTableDataset | NetezzaTableDataset | ZohoObjectDataset | XeroObjectDataset | SquareObjectDataset | SparkObjectDataset | ShopifyObjectDataset | ServiceNowObjectDataset | QuickBooksObjectDataset | PrestoObjectDataset | PhoenixObjectDataset | PaypalObjectDataset | MarketoObjectDataset | AzureMariaDBTableDataset | MariaDBTableDataset | MagentoObjectDataset | JiraObjectDataset | ImpalaObjectDataset | HubspotObjectDataset | HiveObjectDataset | HBaseObjectDataset | GreenplumTableDataset | GoogleBigQueryObjectDataset | EloquaObjectDataset | DrillTableDataset | CouchbaseTableDataset | ConcurObjectDataset | AzurePostgreSqlTableDataset | AmazonMWSObjectDataset | HttpDataset | AzureSearchIndexDataset | WebTableDataset | SapTableResourceDataset | RestResourceDataset | SqlServerTableDataset | SapOpenHubTableDataset | SapHanaTableDataset | SapEccResourceDataset | SapCloudForCustomerResourceDataset | SapBwCubeDataset | SybaseTableDataset | SalesforceServiceCloudObjectDataset | SalesforceObjectDataset | MicrosoftAccessTableDataset | PostgreSqlTableDataset | MySqlTableDataset | OdbcTableDataset | InformixTableDataset | RelationalTableDataset | Db2TableDataset | AmazonRedshiftTableDataset | AzureMySqlTableDataset | TeradataTableDataset | OracleTableDataset | ODataResourceDataset | CosmosDbMongoDbApiCollectionDataset | MongoDbV2CollectionDataset | MongoDbCollectionDataset | FileShareDataset | Office365Dataset | AzureBlobFSDataset | AzureDataLakeStoreDataset | CommonDataServiceForAppsEntityDataset | DynamicsCrmEntityDataset | DynamicsEntityDataset | DocumentDbCollectionDataset | CosmosDbSqlApiCollectionDataset | CustomDataset | CassandraTableDataset | AzureSqlDWTableDataset | AzureSqlMITableDataset | AzureSqlTableDataset | AzureTableDataset | AzureBlobDataset | BinaryDataset | OrcDataset | XmlDataset | JsonDataset | DelimitedTextDataset | ParquetDataset | ExcelDataset | AvroDataset | AmazonS3Dataset;
/**
* The Azure Data Factory nested object which identifies data within different data stores, such as
@@ -1935,6 +1954,14 @@ export interface DataFlowSink extends Transformation {
* Dataset reference.
*/
dataset?: DatasetReference;
+ /**
+ * Linked service reference.
+ */
+ linkedService?: LinkedServiceReference;
+ /**
+ * Schema linked service reference.
+ */
+ schemaLinkedService?: LinkedServiceReference;
}
/**
@@ -1945,6 +1972,14 @@ export interface DataFlowSource extends Transformation {
* Dataset reference.
*/
dataset?: DatasetReference;
+ /**
+ * Linked service reference.
+ */
+ linkedService?: LinkedServiceReference;
+ /**
+ * Schema linked service reference.
+ */
+ schemaLinkedService?: LinkedServiceReference;
}
/**
@@ -1986,6 +2021,98 @@ export interface MappingDataFlow {
script?: string;
}
+/**
+ * SharePoint Online List linked service.
+ */
+export interface SharePointOnlineListLinkedService {
+ /**
+ * Polymorphic Discriminator
+ */
+ type: "SharePointOnlineList";
+ /**
+ * The integration runtime reference.
+ */
+ connectVia?: IntegrationRuntimeReference;
+ /**
+ * Linked service description.
+ */
+ description?: string;
+ /**
+ * Parameters for linked service.
+ */
+ parameters?: { [propertyName: string]: ParameterSpecification };
+ /**
+ * List of tags that can be used for describing the linked service.
+ */
+ annotations?: any[];
+ /**
+ * The URL of the SharePoint Online site. For example,
+ * https://contoso.sharepoint.com/sites/siteName. Type: string (or Expression with resultType
+ * string).
+ */
+ siteUrl: any;
+ /**
+ * The tenant ID under which your application resides. You can find it from Azure portal Active
+ * Directory overview page. Type: string (or Expression with resultType string).
+ */
+ tenantId: any;
+ /**
+ * The application (client) ID of your application registered in Azure Active Directory. Make
+ * sure to grant SharePoint site permission to this application. Type: string (or Expression with
+ * resultType string).
+ */
+ servicePrincipalId: any;
+ /**
+ * The client secret of your application registered in Azure Active Directory. Type: string (or
+ * Expression with resultType string).
+ */
+ servicePrincipalKey: SecretBaseUnion;
+ /**
+ * The encrypted credential used for authentication. Credentials are encrypted using the
+ * integration runtime credential manager. Type: string (or Expression with resultType string).
+ */
+ encryptedCredential?: any;
+}
+
+/**
+ * Snowflake linked service.
+ */
+export interface SnowflakeLinkedService {
+ /**
+ * Polymorphic Discriminator
+ */
+ type: "Snowflake";
+ /**
+ * The integration runtime reference.
+ */
+ connectVia?: IntegrationRuntimeReference;
+ /**
+ * Linked service description.
+ */
+ description?: string;
+ /**
+ * Parameters for linked service.
+ */
+ parameters?: { [propertyName: string]: ParameterSpecification };
+ /**
+ * List of tags that can be used for describing the linked service.
+ */
+ annotations?: any[];
+ /**
+ * The connection string of snowflake. Type: string, SecureString.
+ */
+ connectionString: any;
+ /**
+ * The Azure key vault secret reference of password in connection string.
+ */
+ password?: AzureKeyVaultSecretReference;
+ /**
+ * The encrypted credential used for authentication. Credentials are encrypted using the
+ * integration runtime credential manager. Type: string (or Expression with resultType string).
+ */
+ encryptedCredential?: any;
+}
+
/**
* Azure Function linked service.
*/
@@ -4663,7 +4790,7 @@ export interface SapHanaLinkedService {
/**
* Host name of the SAP HANA server. Type: string (or Expression with resultType string).
*/
- server: any;
+ server?: any;
/**
* The authentication type to be used to connect to the SAP HANA server. Possible values include:
* 'Basic', 'Windows'
@@ -6759,7 +6886,7 @@ export interface AzureFileStorageLinkedService {
/**
* Host name of the server. Type: string (or Expression with resultType string).
*/
- host: any;
+ host?: any;
/**
* User ID to logon the server. Type: string (or Expression with resultType string).
*/
@@ -6768,6 +6895,29 @@ export interface AzureFileStorageLinkedService {
* Password to logon the server.
*/
password?: SecretBaseUnion;
+ /**
+ * The connection string. It is mutually exclusive with sasUri property. Type: string,
+ * SecureString or AzureKeyVaultSecretReference.
+ */
+ connectionString?: any;
+ /**
+ * The Azure key vault secret reference of accountKey in connection string.
+ */
+ accountKey?: AzureKeyVaultSecretReference;
+ /**
+ * SAS URI of the Azure File resource. It is mutually exclusive with connectionString property.
+ * Type: string, SecureString or AzureKeyVaultSecretReference.
+ */
+ sasUri?: any;
+ /**
+ * The Azure key vault secret reference of sasToken in sas uri.
+ */
+ sasToken?: AzureKeyVaultSecretReference;
+ /**
+ * The azure file share name. It is required when auth with accountKey/sasToken. Type: string (or
+ * Expression with resultType string).
+ */
+ fileShare?: any;
/**
* The encrypted credential used for authentication. Credentials are encrypted using the
* integration runtime credential manager. Type: string (or Expression with resultType string).
@@ -7657,6 +7807,99 @@ export interface AzureStorageLinkedService {
encryptedCredential?: string;
}
+/**
+ * The sharepoint online list resource dataset.
+ */
+export interface SharePointOnlineListResourceDataset {
+ /**
+ * Polymorphic Discriminator
+ */
+ type: "SharePointOnlineListResource";
+ /**
+ * Dataset description.
+ */
+ description?: string;
+ /**
+ * Columns that define the structure of the dataset. Type: array (or Expression with resultType
+ * array), itemType: DatasetDataElement.
+ */
+ structure?: any;
+ /**
+ * Columns that define the physical type schema of the dataset. Type: array (or Expression with
+ * resultType array), itemType: DatasetSchemaDataElement.
+ */
+ schema?: any;
+ /**
+ * Linked service reference.
+ */
+ linkedServiceName: LinkedServiceReference;
+ /**
+ * Parameters for dataset.
+ */
+ parameters?: { [propertyName: string]: ParameterSpecification };
+ /**
+ * List of tags that can be used for describing the Dataset.
+ */
+ annotations?: any[];
+ /**
+ * The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
+ */
+ folder?: DatasetFolder;
+ /**
+ * The name of the SharePoint Online list. Type: string (or Expression with resultType string).
+ */
+ listName?: any;
+}
+
+/**
+ * The snowflake dataset.
+ */
+export interface SnowflakeDataset {
+ /**
+ * Polymorphic Discriminator
+ */
+ type: "SnowflakeTable";
+ /**
+ * Dataset description.
+ */
+ description?: string;
+ /**
+ * Columns that define the structure of the dataset. Type: array (or Expression with resultType
+ * array), itemType: DatasetDataElement.
+ */
+ structure?: any;
+ /**
+ * Columns that define the physical type schema of the dataset. Type: array (or Expression with
+ * resultType array), itemType: DatasetSchemaDataElement.
+ */
+ schema?: any;
+ /**
+ * Linked service reference.
+ */
+ linkedServiceName: LinkedServiceReference;
+ /**
+ * Parameters for dataset.
+ */
+ parameters?: { [propertyName: string]: ParameterSpecification };
+ /**
+ * List of tags that can be used for describing the Dataset.
+ */
+ annotations?: any[];
+ /**
+ * The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
+ */
+ folder?: DatasetFolder;
+ /**
+ * The schema name of the Snowflake database. Type: string (or Expression with resultType
+ * string).
+ */
+ snowflakeDatasetSchema?: any;
+ /**
+ * The table name of the Snowflake database. Type: string (or Expression with resultType string).
+ */
+ table?: any;
+}
+
/**
* Google AdWords service dataset.
*/
@@ -12121,13 +12364,13 @@ export interface OrcDataset {
}
/**
- * Json dataset.
+ * Xml dataset.
*/
-export interface JsonDataset {
+export interface XmlDataset {
/**
* Polymorphic Discriminator
*/
- type: "Json";
+ type: "Xml";
/**
* Dataset description.
*/
@@ -12170,6 +12413,10 @@ export interface JsonDataset {
* resultType string).
*/
encodingName?: any;
+ /**
+ * The null value string. Type: string (or Expression with resultType string).
+ */
+ nullValue?: any;
/**
* The data compression method used for the json dataset.
*/
@@ -12177,13 +12424,13 @@ export interface JsonDataset {
}
/**
- * Delimited text dataset.
+ * Json dataset.
*/
-export interface DelimitedTextDataset {
+export interface JsonDataset {
/**
* Polymorphic Discriminator
*/
- type: "DelimitedText";
+ type: "Json";
/**
* Dataset description.
*/
@@ -12215,35 +12462,91 @@ export interface DelimitedTextDataset {
*/
folder?: DatasetFolder;
/**
- * The location of the delimited text storage.
+ * The location of the json data storage.
*/
location: DatasetLocationUnion;
/**
- * The column delimiter. Type: string (or Expression with resultType string).
+ * The code page name of the preferred encoding. If not specified, the default value is UTF-8,
+ * unless BOM denotes another Unicode encoding. Refer to the name column of the table in the
+ * following link to set supported values:
+ * https://msdn.microsoft.com/library/system.text.encoding.aspx. Type: string (or Expression with
+ * resultType string).
*/
- columnDelimiter?: any;
+ encodingName?: any;
/**
- * The row delimiter. Type: string (or Expression with resultType string).
+ * The data compression method used for the json dataset.
*/
- rowDelimiter?: any;
+ compression?: DatasetCompressionUnion;
+}
+
+/**
+ * Delimited text dataset.
+ */
+export interface DelimitedTextDataset {
/**
- * The code page name of the preferred encoding. If miss, the default value is UTF-8, unless BOM
- * denotes another Unicode encoding. Refer to the name column of the table in the following link
- * to set supported values: https://msdn.microsoft.com/library/system.text.encoding.aspx. Type:
- * string (or Expression with resultType string).
+ * Polymorphic Discriminator
*/
- encodingName?: any;
- compressionCodec?: any;
+ type: "DelimitedText";
/**
- * The data compression method used for DelimitedText.
+ * Dataset description.
*/
- compressionLevel?: any;
+ description?: string;
/**
- * The quote character. Type: string (or Expression with resultType string).
+ * Columns that define the structure of the dataset. Type: array (or Expression with resultType
+ * array), itemType: DatasetDataElement.
*/
- quoteChar?: any;
+ structure?: any;
/**
- * The escape character. Type: string (or Expression with resultType string).
+ * Columns that define the physical type schema of the dataset. Type: array (or Expression with
+ * resultType array), itemType: DatasetSchemaDataElement.
+ */
+ schema?: any;
+ /**
+ * Linked service reference.
+ */
+ linkedServiceName: LinkedServiceReference;
+ /**
+ * Parameters for dataset.
+ */
+ parameters?: { [propertyName: string]: ParameterSpecification };
+ /**
+ * List of tags that can be used for describing the Dataset.
+ */
+ annotations?: any[];
+ /**
+ * The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
+ */
+ folder?: DatasetFolder;
+ /**
+ * The location of the delimited text storage.
+ */
+ location: DatasetLocationUnion;
+ /**
+ * The column delimiter. Type: string (or Expression with resultType string).
+ */
+ columnDelimiter?: any;
+ /**
+ * The row delimiter. Type: string (or Expression with resultType string).
+ */
+ rowDelimiter?: any;
+ /**
+ * The code page name of the preferred encoding. If miss, the default value is UTF-8, unless BOM
+ * denotes another Unicode encoding. Refer to the name column of the table in the following link
+ * to set supported values: https://msdn.microsoft.com/library/system.text.encoding.aspx. Type:
+ * string (or Expression with resultType string).
+ */
+ encodingName?: any;
+ compressionCodec?: any;
+ /**
+ * The data compression method used for DelimitedText.
+ */
+ compressionLevel?: any;
+ /**
+ * The quote character. Type: string (or Expression with resultType string).
+ */
+ quoteChar?: any;
+ /**
+ * The escape character. Type: string (or Expression with resultType string).
*/
escapeChar?: any;
/**
@@ -12303,6 +12606,72 @@ export interface ParquetDataset {
compressionCodec?: any;
}
+/**
+ * Excel dataset.
+ */
+export interface ExcelDataset {
+ /**
+ * Polymorphic Discriminator
+ */
+ type: "Excel";
+ /**
+ * Dataset description.
+ */
+ description?: string;
+ /**
+ * Columns that define the structure of the dataset. Type: array (or Expression with resultType
+ * array), itemType: DatasetDataElement.
+ */
+ structure?: any;
+ /**
+ * Columns that define the physical type schema of the dataset. Type: array (or Expression with
+ * resultType array), itemType: DatasetSchemaDataElement.
+ */
+ schema?: any;
+ /**
+ * Linked service reference.
+ */
+ linkedServiceName: LinkedServiceReference;
+ /**
+ * Parameters for dataset.
+ */
+ parameters?: { [propertyName: string]: ParameterSpecification };
+ /**
+ * List of tags that can be used for describing the Dataset.
+ */
+ annotations?: any[];
+ /**
+ * The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
+ */
+ folder?: DatasetFolder;
+ /**
+ * The location of the excel storage.
+ */
+ location: DatasetLocationUnion;
+ /**
+ * The sheet of excel file. Type: string (or Expression with resultType string).
+ */
+ sheetName: any;
+ /**
+ * The partial data of one sheet. Type: string (or Expression with resultType string).
+ */
+ range?: any;
+ /**
+ * When used as input, treat the first row of data as headers. When used as output,write the
+ * headers into the output as the first row of data. The default value is false. Type: boolean
+ * (or Expression with resultType boolean).
+ */
+ firstRowAsHeader?: any;
+ /**
+ * The data compression method used for the json dataset.
+ */
+ compression?: DatasetCompressionUnion;
+ /**
+ * The null value string. Type: string (or Expression with resultType string).
+ */
+ nullValue?: any;
+}
+
/**
* Avro dataset.
*/
@@ -13502,164 +13871,165 @@ export interface AzureMLBatchExecutionActivity {
}
/**
- * Activity to get metadata of dataset
+ * Contains the possible cases for CompressionReadSettings.
*/
-export interface GetMetadataActivity {
+export type CompressionReadSettingsUnion = CompressionReadSettings | ZipDeflateReadSettings;
+
+/**
+ * Compression read settings.
+ */
+export interface CompressionReadSettings {
/**
* Polymorphic Discriminator
*/
- type: "GetMetadata";
- /**
- * Activity name.
- */
- name: string;
- /**
- * Activity description.
- */
- description?: string;
- /**
- * Activity depends on condition.
- */
- dependsOn?: ActivityDependency[];
- /**
- * Activity user properties.
- */
- userProperties?: UserProperty[];
- /**
- * Linked service reference.
- */
- linkedServiceName?: LinkedServiceReference;
- /**
- * Activity policy.
- */
- policy?: ActivityPolicy;
- /**
- * GetMetadata activity dataset reference.
- */
- dataset: DatasetReference;
+ type: "CompressionReadSettings";
/**
- * Fields of metadata to get from dataset.
+ * Describes unknown properties. The value of an unknown property can be of "any" type.
*/
- fieldList?: any[];
+ [property: string]: any;
}
/**
- * Web activity authentication properties.
+ * The ZipDeflate compression read settings.
*/
-export interface WebActivityAuthentication {
- /**
- * Web activity authentication (Basic/ClientCertificate/MSI)
- */
- type: string;
+export interface ZipDeflateReadSettings {
/**
- * Base64-encoded contents of a PFX file.
+ * Polymorphic Discriminator
*/
- pfx?: SecretBaseUnion;
+ type: "ZipDeflateReadSettings";
/**
- * Web activity authentication user name for basic authentication.
+ * Preserve the zip file name as folder path. Type: boolean (or Expression with resultType
+ * boolean).
*/
- username?: string;
+ preserveZipFileNameAsFolder?: any;
+}
+
+/**
+ * Contains the possible cases for FormatReadSettings.
+ */
+export type FormatReadSettingsUnion = FormatReadSettings | BinaryReadSettings | XmlReadSettings | JsonReadSettings | DelimitedTextReadSettings;
+
+/**
+ * Format read settings.
+ */
+export interface FormatReadSettings {
/**
- * Password for the PFX file or basic authentication.
+ * Polymorphic Discriminator
*/
- password?: SecretBaseUnion;
+ type: "FormatReadSettings";
/**
- * Resource for which Azure Auth token will be requested when using MSI Authentication.
+ * Describes unknown properties. The value of an unknown property can be of "any" type.
*/
- resource?: string;
+ [property: string]: any;
}
/**
- * Web activity.
+ * Binary read settings.
*/
-export interface WebActivity {
+export interface BinaryReadSettings {
/**
* Polymorphic Discriminator
*/
- type: "WebActivity";
- /**
- * Activity name.
- */
- name: string;
+ type: "BinaryReadSettings";
/**
- * Activity description.
+ * Compression settings.
*/
- description?: string;
+ compressionProperties?: CompressionReadSettingsUnion;
+}
+
+/**
+ * Xml read settings.
+ */
+export interface XmlReadSettings {
/**
- * Activity depends on condition.
+ * Polymorphic Discriminator
*/
- dependsOn?: ActivityDependency[];
+ type: "XmlReadSettings";
/**
- * Activity user properties.
+ * Compression settings.
*/
- userProperties?: UserProperty[];
+ compressionProperties?: CompressionReadSettingsUnion;
/**
- * Linked service reference.
+ * Indicates what validation method is used when reading the xml files. Allowed values: 'none',
+ * 'xsd', or 'dtd'. Type: string (or Expression with resultType string).
*/
- linkedServiceName?: LinkedServiceReference;
+ validationMode?: any;
/**
- * Activity policy.
+ * Namespace uri to prefix mappings to override the prefixes in column names when namespace is
+ * enabled, if no prefix is defined for a namespace uri, the prefix of xml element/attribute name
+ * in the xml data file will be used. Example: "{"http://www.example.com/xml":"prefix"}" Type:
+ * object (or Expression with resultType object).
*/
- policy?: ActivityPolicy;
+ namespacePrefixes?: any;
+}
+
+/**
+ * Json read settings.
+ */
+export interface JsonReadSettings {
/**
- * Rest API method for target endpoint. Possible values include: 'GET', 'POST', 'PUT', 'DELETE'
+ * Polymorphic Discriminator
*/
- method: WebActivityMethod;
+ type: "JsonReadSettings";
/**
- * Web activity target endpoint and path. Type: string (or Expression with resultType string).
+ * Compression settings.
*/
- url: any;
+ compressionProperties?: CompressionReadSettingsUnion;
+}
+
+/**
+ * Delimited text read settings.
+ */
+export interface DelimitedTextReadSettings {
/**
- * Represents the headers that will be sent to the request. For example, to set the language and
- * type on a request: "headers" : { "Accept-Language": "en-us", "Content-Type":
- * "application/json" }. Type: string (or Expression with resultType string).
+ * Polymorphic Discriminator
*/
- headers?: any;
+ type: "DelimitedTextReadSettings";
/**
- * Represents the payload that will be sent to the endpoint. Required for POST/PUT method, not
- * allowed for GET method Type: string (or Expression with resultType string).
+ * Indicates the number of non-empty rows to skip when reading data from input files. Type:
+ * integer (or Expression with resultType integer).
*/
- body?: any;
+ skipLineCount?: any;
/**
- * Authentication method used for calling the endpoint.
+ * Compression settings.
*/
- authentication?: WebActivityAuthentication;
+ compressionProperties?: CompressionReadSettingsUnion;
+}
+
+/**
+ * Distcp settings.
+ */
+export interface DistcpSettings {
/**
- * List of datasets passed to web endpoint.
+ * Specifies the Yarn ResourceManager endpoint. Type: string (or Expression with resultType
+ * string).
*/
- datasets?: DatasetReference[];
+ resourceManagerEndpoint: any;
/**
- * List of linked services passed to web endpoint.
+ * Specifies an existing folder path which will be used to store temp Distcp command script. The
+ * script file is generated by ADF and will be removed after Copy job finished. Type: string (or
+ * Expression with resultType string).
*/
- linkedServices?: LinkedServiceReference[];
+ tempScriptPath: any;
/**
- * The integration runtime reference.
+ * Specifies the Distcp options. Type: string (or Expression with resultType string).
*/
- connectVia?: IntegrationRuntimeReference;
+ distcpOptions?: any;
}
/**
- * Contains the possible cases for CopySource.
+ * Contains the possible cases for StoreReadSettings.
*/
-export type CopySourceUnion = CopySource | HttpSource | AzureBlobFSSource | AzureDataLakeStoreSource | Office365Source | CosmosDbMongoDbApiSource | MongoDbV2Source | MongoDbSource | WebSource | OracleSource | AzureDataExplorerSource | HdfsSource | FileSystemSource | RestSource | SalesforceServiceCloudSource | ODataSource | MicrosoftAccessSource | RelationalSource | CommonDataServiceForAppsSource | DynamicsCrmSource | DynamicsSource | CosmosDbSqlApiSource | DocumentDbCollectionSource | BlobSource | TabularSourceUnion | BinarySource | OrcSource | JsonSource | DelimitedTextSource | ParquetSource | AvroSource;
+export type StoreReadSettingsUnion = StoreReadSettings | HdfsReadSettings | HttpReadSettings | SftpReadSettings | FtpReadSettings | GoogleCloudStorageReadSettings | AzureFileStorageReadSettings | FileServerReadSettings | AmazonS3ReadSettings | AzureDataLakeStoreReadSettings | AzureBlobFSReadSettings | AzureBlobStorageReadSettings;
/**
- * A copy activity source.
+ * Connector read setting.
*/
-export interface CopySource {
+export interface StoreReadSettings {
/**
* Polymorphic Discriminator
*/
- type: "CopySource";
- /**
- * Source retry count. Type: integer (or Expression with resultType integer).
- */
- sourceRetryCount?: any;
- /**
- * Source retry wait. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
- */
- sourceRetryWait?: any;
+ type: "StoreReadSettings";
/**
* The maximum concurrent connection count for the source data store. Type: integer (or
* Expression with resultType integer).
@@ -13672,447 +14042,815 @@ export interface CopySource {
}
/**
- * A copy activity source for an HTTP file.
+ * HDFS read settings.
*/
-export interface HttpSource {
+export interface HdfsReadSettings {
/**
* Polymorphic Discriminator
*/
- type: "HttpSource";
- /**
- * Source retry count. Type: integer (or Expression with resultType integer).
- */
- sourceRetryCount?: any;
- /**
- * Source retry wait. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
- */
- sourceRetryWait?: any;
+ type: "HdfsReadSettings";
/**
* The maximum concurrent connection count for the source data store. Type: integer (or
* Expression with resultType integer).
*/
maxConcurrentConnections?: any;
/**
- * Specifies the timeout for a HTTP client to get HTTP response from HTTP server. The default
- * value is equivalent to System.Net.HttpWebRequest.Timeout. Type: string (or Expression with
- * resultType string), pattern: ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * If true, files under the folder path will be read recursively. Default is true. Type: boolean
+ * (or Expression with resultType boolean).
*/
- httpRequestTimeout?: any;
-}
-
-/**
- * A copy activity Azure BlobFS source.
- */
-export interface AzureBlobFSSource {
+ recursive?: any;
/**
- * Polymorphic Discriminator
+ * HDFS wildcardFolderPath. Type: string (or Expression with resultType string).
*/
- type: "AzureBlobFSSource";
+ wildcardFolderPath?: any;
/**
- * Source retry count. Type: integer (or Expression with resultType integer).
+ * HDFS wildcardFileName. Type: string (or Expression with resultType string).
*/
- sourceRetryCount?: any;
+ wildcardFileName?: any;
/**
- * Source retry wait. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * Point to a text file that lists each file (relative path to the path configured in the
+ * dataset) that you want to copy. Type: string (or Expression with resultType string).
*/
- sourceRetryWait?: any;
+ fileListPath?: any;
/**
- * The maximum concurrent connection count for the source data store. Type: integer (or
- * Expression with resultType integer).
+ * Indicates whether to enable partition discovery.
*/
- maxConcurrentConnections?: any;
+ enablePartitionDiscovery?: boolean;
/**
- * Treat empty as null. Type: boolean (or Expression with resultType boolean).
+ * Specify the root path where partition discovery starts from. Type: string (or Expression with
+ * resultType string).
*/
- treatEmptyAsNull?: any;
+ partitionRootPath?: any;
/**
- * Number of header lines to skip from each blob. Type: integer (or Expression with resultType
- * integer).
+ * The start of file's modified datetime. Type: string (or Expression with resultType string).
*/
- skipHeaderLineCount?: any;
+ modifiedDatetimeStart?: any;
/**
- * If true, files under the folder path will be read recursively. Default is true. Type: boolean
- * (or Expression with resultType boolean).
+ * The end of file's modified datetime. Type: string (or Expression with resultType string).
*/
- recursive?: any;
+ modifiedDatetimeEnd?: any;
+ /**
+ * Specifies Distcp-related settings.
+ */
+ distcpSettings?: DistcpSettings;
}
/**
- * A copy activity Azure Data Lake source.
+ * Sftp read settings.
*/
-export interface AzureDataLakeStoreSource {
+export interface HttpReadSettings {
/**
* Polymorphic Discriminator
*/
- type: "AzureDataLakeStoreSource";
- /**
- * Source retry count. Type: integer (or Expression with resultType integer).
- */
- sourceRetryCount?: any;
- /**
- * Source retry wait. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
- */
- sourceRetryWait?: any;
+ type: "HttpReadSettings";
/**
* The maximum concurrent connection count for the source data store. Type: integer (or
* Expression with resultType integer).
*/
maxConcurrentConnections?: any;
/**
- * If true, files under the folder path will be read recursively. Default is true. Type: boolean
- * (or Expression with resultType boolean).
- */
- recursive?: any;
-}
-
-/**
- * A copy activity source for an Office 365 service.
- */
-export interface Office365Source {
- /**
- * Polymorphic Discriminator
+ * The HTTP method used to call the RESTful API. The default is GET. Type: string (or Expression
+ * with resultType string).
*/
- type: "Office365Source";
+ requestMethod?: any;
/**
- * Source retry count. Type: integer (or Expression with resultType integer).
+ * The HTTP request body to the RESTful API if requestMethod is POST. Type: string (or Expression
+ * with resultType string).
*/
- sourceRetryCount?: any;
+ requestBody?: any;
/**
- * Source retry wait. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * The additional HTTP headers in the request to the RESTful API. Type: string (or Expression
+ * with resultType string).
*/
- sourceRetryWait?: any;
+ additionalHeaders?: any;
/**
- * The maximum concurrent connection count for the source data store. Type: integer (or
- * Expression with resultType integer).
+ * Specifies the timeout for a HTTP client to get HTTP response from HTTP server.
*/
- maxConcurrentConnections?: any;
+ requestTimeout?: any;
/**
- * The groups containing all the users. Type: array of strings (or Expression with resultType
- * array of strings).
+ * Indicates whether to enable partition discovery.
*/
- allowedGroups?: any;
+ enablePartitionDiscovery?: boolean;
/**
- * The user scope uri. Type: string (or Expression with resultType string).
+ * Specify the root path where partition discovery starts from. Type: string (or Expression with
+ * resultType string).
*/
- userScopeFilterUri?: any;
+ partitionRootPath?: any;
+}
+
+/**
+ * Sftp read settings.
+ */
+export interface SftpReadSettings {
/**
- * The Column to apply the and . Type:
- * string (or Expression with resultType string).
+ * Polymorphic Discriminator
*/
- dateFilterColumn?: any;
+ type: "SftpReadSettings";
/**
- * Start time of the requested range for this dataset. Type: string (or Expression with
- * resultType string).
+ * The maximum concurrent connection count for the source data store. Type: integer (or
+ * Expression with resultType integer).
*/
- startTime?: any;
+ maxConcurrentConnections?: any;
/**
- * End time of the requested range for this dataset. Type: string (or Expression with resultType
- * string).
+ * If true, files under the folder path will be read recursively. Default is true. Type: boolean
+ * (or Expression with resultType boolean).
*/
- endTime?: any;
+ recursive?: any;
/**
- * The columns to be read out from the Office 365 table. Type: array of objects (or Expression
- * with resultType array of objects). Example: [ { "name": "Id" }, { "name": "CreatedDateTime" }
- * ]
+ * Sftp wildcardFolderPath. Type: string (or Expression with resultType string).
*/
- outputColumns?: any;
-}
-
-/**
- * Specify the column name and value of additional columns.
- */
-export interface AdditionalColumns {
+ wildcardFolderPath?: any;
/**
- * Additional column name. Type: string (or Expression with resultType string).
+ * Sftp wildcardFileName. Type: string (or Expression with resultType string).
*/
- name?: any;
+ wildcardFileName?: any;
/**
- * Additional column value. Type: string (or Expression with resultType string).
+ * Indicates whether to enable partition discovery.
*/
- value?: any;
-}
-
-/**
- * Cursor methods for Mongodb query
- */
-export interface MongoDbCursorMethodsProperties {
+ enablePartitionDiscovery?: boolean;
/**
- * Specifies the fields to return in the documents that match the query filter. To return all
- * fields in the matching documents, omit this parameter. Type: string (or Expression with
+ * Specify the root path where partition discovery starts from. Type: string (or Expression with
* resultType string).
*/
- project?: any;
+ partitionRootPath?: any;
/**
- * Specifies the order in which the query returns matching documents. Type: string (or Expression
- * with resultType string). Type: string (or Expression with resultType string).
+ * Point to a text file that lists each file (relative path to the path configured in the
+ * dataset) that you want to copy. Type: string (or Expression with resultType string).
*/
- sort?: any;
+ fileListPath?: any;
/**
- * Specifies the how many documents skipped and where MongoDB begins returning results. This
- * approach may be useful in implementing paginated results. Type: integer (or Expression with
- * resultType integer).
+ * Indicates whether the source files need to be deleted after copy completion. Default is false.
+ * Type: boolean (or Expression with resultType boolean).
*/
- skip?: any;
+ deleteFilesAfterCompletion?: any;
/**
- * Specifies the maximum number of documents the server returns. limit() is analogous to the
- * LIMIT statement in a SQL database. Type: integer (or Expression with resultType integer).
+ * The start of file's modified datetime. Type: string (or Expression with resultType string).
*/
- limit?: any;
+ modifiedDatetimeStart?: any;
/**
- * Describes unknown properties. The value of an unknown property can be of "any" type.
+ * The end of file's modified datetime. Type: string (or Expression with resultType string).
*/
- [property: string]: any;
+ modifiedDatetimeEnd?: any;
}
/**
- * A copy activity source for a CosmosDB (MongoDB API) database.
+ * Ftp read settings.
*/
-export interface CosmosDbMongoDbApiSource {
+export interface FtpReadSettings {
/**
* Polymorphic Discriminator
*/
- type: "CosmosDbMongoDbApiSource";
+ type: "FtpReadSettings";
/**
- * Source retry count. Type: integer (or Expression with resultType integer).
+ * The maximum concurrent connection count for the source data store. Type: integer (or
+ * Expression with resultType integer).
*/
- sourceRetryCount?: any;
+ maxConcurrentConnections?: any;
/**
- * Source retry wait. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * If true, files under the folder path will be read recursively. Default is true. Type: boolean
+ * (or Expression with resultType boolean).
*/
- sourceRetryWait?: any;
+ recursive?: any;
/**
- * The maximum concurrent connection count for the source data store. Type: integer (or
- * Expression with resultType integer).
+ * Ftp wildcardFolderPath. Type: string (or Expression with resultType string).
*/
- maxConcurrentConnections?: any;
+ wildcardFolderPath?: any;
/**
- * Specifies selection filter using query operators. To return all documents in a collection,
- * omit this parameter or pass an empty document ({}). Type: string (or Expression with
- * resultType string).
+ * Ftp wildcardFileName. Type: string (or Expression with resultType string).
*/
- filter?: any;
+ wildcardFileName?: any;
/**
- * Cursor methods for Mongodb query.
+ * Indicates whether to enable partition discovery.
*/
- cursorMethods?: MongoDbCursorMethodsProperties;
+ enablePartitionDiscovery?: boolean;
/**
- * Specifies the number of documents to return in each batch of the response from MongoDB
- * instance. In most cases, modifying the batch size will not affect the user or the application.
- * This property's main purpose is to avoid hit the limitation of response size. Type: integer
- * (or Expression with resultType integer).
+ * Specify the root path where partition discovery starts from. Type: string (or Expression with
+ * resultType string).
*/
- batchSize?: any;
+ partitionRootPath?: any;
/**
- * Query timeout. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * Indicates whether the source files need to be deleted after copy completion. Default is false.
+ * Type: boolean (or Expression with resultType boolean).
*/
- queryTimeout?: any;
+ deleteFilesAfterCompletion?: any;
/**
- * Specifies the additional columns to be added to source data. Type: array of objects (or
- * Expression with resultType array of objects).
+ * Point to a text file that lists each file (relative path to the path configured in the
+ * dataset) that you want to copy. Type: string (or Expression with resultType string).
*/
- additionalColumns?: AdditionalColumns[];
+ fileListPath?: any;
+ /**
+ * Specify whether to use binary transfer mode for FTP stores.
+ */
+ useBinaryTransfer?: boolean;
}
/**
- * A copy activity source for a MongoDB database.
+ * Google Cloud Storage read settings.
*/
-export interface MongoDbV2Source {
+export interface GoogleCloudStorageReadSettings {
/**
* Polymorphic Discriminator
*/
- type: "MongoDbV2Source";
+ type: "GoogleCloudStorageReadSettings";
/**
- * Source retry count. Type: integer (or Expression with resultType integer).
+ * The maximum concurrent connection count for the source data store. Type: integer (or
+ * Expression with resultType integer).
*/
- sourceRetryCount?: any;
+ maxConcurrentConnections?: any;
/**
- * Source retry wait. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * If true, files under the folder path will be read recursively. Default is true. Type: boolean
+ * (or Expression with resultType boolean).
*/
- sourceRetryWait?: any;
+ recursive?: any;
/**
- * The maximum concurrent connection count for the source data store. Type: integer (or
- * Expression with resultType integer).
+ * Google Cloud Storage wildcardFolderPath. Type: string (or Expression with resultType string).
*/
- maxConcurrentConnections?: any;
+ wildcardFolderPath?: any;
/**
- * Specifies selection filter using query operators. To return all documents in a collection,
- * omit this parameter or pass an empty document ({}). Type: string (or Expression with
+ * Google Cloud Storage wildcardFileName. Type: string (or Expression with resultType string).
+ */
+ wildcardFileName?: any;
+ /**
+ * The prefix filter for the Google Cloud Storage object name. Type: string (or Expression with
* resultType string).
*/
- filter?: any;
+ prefix?: any;
/**
- * Cursor methods for Mongodb query
+ * Point to a text file that lists each file (relative path to the path configured in the
+ * dataset) that you want to copy. Type: string (or Expression with resultType string).
*/
- cursorMethods?: MongoDbCursorMethodsProperties;
+ fileListPath?: any;
/**
- * Specifies the number of documents to return in each batch of the response from MongoDB
- * instance. In most cases, modifying the batch size will not affect the user or the application.
- * This property's main purpose is to avoid hit the limitation of response size. Type: integer
- * (or Expression with resultType integer).
+ * Indicates whether to enable partition discovery.
*/
- batchSize?: any;
+ enablePartitionDiscovery?: boolean;
/**
- * Query timeout. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * Specify the root path where partition discovery starts from. Type: string (or Expression with
+ * resultType string).
*/
- queryTimeout?: any;
+ partitionRootPath?: any;
/**
- * Specifies the additional columns to be added to source data. Type: array of objects (or
- * Expression with resultType array of objects).
+ * Indicates whether the source files need to be deleted after copy completion. Default is false.
+ * Type: boolean (or Expression with resultType boolean).
*/
- additionalColumns?: AdditionalColumns[];
+ deleteFilesAfterCompletion?: any;
+ /**
+ * The start of file's modified datetime. Type: string (or Expression with resultType string).
+ */
+ modifiedDatetimeStart?: any;
+ /**
+ * The end of file's modified datetime. Type: string (or Expression with resultType string).
+ */
+ modifiedDatetimeEnd?: any;
}
/**
- * A copy activity source for a MongoDB database.
+ * Azure File Storage read settings.
*/
-export interface MongoDbSource {
+export interface AzureFileStorageReadSettings {
/**
* Polymorphic Discriminator
*/
- type: "MongoDbSource";
+ type: "AzureFileStorageReadSettings";
/**
- * Source retry count. Type: integer (or Expression with resultType integer).
+ * The maximum concurrent connection count for the source data store. Type: integer (or
+ * Expression with resultType integer).
*/
- sourceRetryCount?: any;
+ maxConcurrentConnections?: any;
/**
- * Source retry wait. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * If true, files under the folder path will be read recursively. Default is true. Type: boolean
+ * (or Expression with resultType boolean).
*/
- sourceRetryWait?: any;
+ recursive?: any;
/**
- * The maximum concurrent connection count for the source data store. Type: integer (or
- * Expression with resultType integer).
+ * Azure File Storage wildcardFolderPath. Type: string (or Expression with resultType string).
*/
- maxConcurrentConnections?: any;
+ wildcardFolderPath?: any;
/**
- * Database query. Should be a SQL-92 query expression. Type: string (or Expression with
+ * Azure File Storage wildcardFileName. Type: string (or Expression with resultType string).
+ */
+ wildcardFileName?: any;
+ /**
+ * The prefix filter for the Azure File name starting from root path. Type: string (or Expression
+ * with resultType string).
+ */
+ prefix?: any;
+ /**
+ * Point to a text file that lists each file (relative path to the path configured in the
+ * dataset) that you want to copy. Type: string (or Expression with resultType string).
+ */
+ fileListPath?: any;
+ /**
+ * Indicates whether to enable partition discovery.
+ */
+ enablePartitionDiscovery?: boolean;
+ /**
+ * Specify the root path where partition discovery starts from. Type: string (or Expression with
* resultType string).
*/
- query?: any;
+ partitionRootPath?: any;
/**
- * Specifies the additional columns to be added to source data. Type: array of objects (or
- * Expression with resultType array of objects).
+ * Indicates whether the source files need to be deleted after copy completion. Default is false.
+ * Type: boolean (or Expression with resultType boolean).
*/
- additionalColumns?: AdditionalColumns[];
+ deleteFilesAfterCompletion?: any;
+ /**
+ * The start of file's modified datetime. Type: string (or Expression with resultType string).
+ */
+ modifiedDatetimeStart?: any;
+ /**
+ * The end of file's modified datetime. Type: string (or Expression with resultType string).
+ */
+ modifiedDatetimeEnd?: any;
}
/**
- * A copy activity source for web page table.
+ * File server read settings.
*/
-export interface WebSource {
+export interface FileServerReadSettings {
/**
* Polymorphic Discriminator
*/
- type: "WebSource";
+ type: "FileServerReadSettings";
/**
- * Source retry count. Type: integer (or Expression with resultType integer).
+ * The maximum concurrent connection count for the source data store. Type: integer (or
+ * Expression with resultType integer).
*/
- sourceRetryCount?: any;
+ maxConcurrentConnections?: any;
/**
- * Source retry wait. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * If true, files under the folder path will be read recursively. Default is true. Type: boolean
+ * (or Expression with resultType boolean).
*/
- sourceRetryWait?: any;
+ recursive?: any;
/**
- * The maximum concurrent connection count for the source data store. Type: integer (or
- * Expression with resultType integer).
+ * FileServer wildcardFolderPath. Type: string (or Expression with resultType string).
*/
- maxConcurrentConnections?: any;
+ wildcardFolderPath?: any;
/**
- * Specifies the additional columns to be added to source data. Type: array of objects (or
- * Expression with resultType array of objects).
+ * FileServer wildcardFileName. Type: string (or Expression with resultType string).
*/
- additionalColumns?: AdditionalColumns[];
+ wildcardFileName?: any;
+ /**
+ * Point to a text file that lists each file (relative path to the path configured in the
+ * dataset) that you want to copy. Type: string (or Expression with resultType string).
+ */
+ fileListPath?: any;
+ /**
+ * Indicates whether to enable partition discovery.
+ */
+ enablePartitionDiscovery?: boolean;
+ /**
+ * Specify the root path where partition discovery starts from. Type: string (or Expression with
+ * resultType string).
+ */
+ partitionRootPath?: any;
+ /**
+ * Indicates whether the source files need to be deleted after copy completion. Default is false.
+ * Type: boolean (or Expression with resultType boolean).
+ */
+ deleteFilesAfterCompletion?: any;
+ /**
+ * The start of file's modified datetime. Type: string (or Expression with resultType string).
+ */
+ modifiedDatetimeStart?: any;
+ /**
+ * The end of file's modified datetime. Type: string (or Expression with resultType string).
+ */
+ modifiedDatetimeEnd?: any;
+ /**
+ * Specify a filter to be used to select a subset of files in the folderPath rather than all
+ * files. Type: string (or Expression with resultType string).
+ */
+ fileFilter?: any;
}
/**
- * The settings that will be leveraged for Oracle source partitioning.
+ * Azure data lake store read settings.
*/
-export interface OraclePartitionSettings {
+export interface AmazonS3ReadSettings {
/**
- * Names of the physical partitions of Oracle table.
+ * Polymorphic Discriminator
*/
- partitionNames?: any;
+ type: "AmazonS3ReadSettings";
/**
- * The name of the column in integer type that will be used for proceeding range partitioning.
- * Type: string (or Expression with resultType string).
+ * The maximum concurrent connection count for the source data store. Type: integer (or
+ * Expression with resultType integer).
*/
- partitionColumnName?: any;
+ maxConcurrentConnections?: any;
/**
- * The maximum value of column specified in partitionColumnName that will be used for proceeding
- * range partitioning. Type: string (or Expression with resultType string).
+ * If true, files under the folder path will be read recursively. Default is true. Type: boolean
+ * (or Expression with resultType boolean).
*/
- partitionUpperBound?: any;
+ recursive?: any;
/**
- * The minimum value of column specified in partitionColumnName that will be used for proceeding
- * range partitioning. Type: string (or Expression with resultType string).
+ * AmazonS3 wildcardFolderPath. Type: string (or Expression with resultType string).
*/
- partitionLowerBound?: any;
+ wildcardFolderPath?: any;
+ /**
+ * AmazonS3 wildcardFileName. Type: string (or Expression with resultType string).
+ */
+ wildcardFileName?: any;
+ /**
+ * The prefix filter for the S3 object name. Type: string (or Expression with resultType string).
+ */
+ prefix?: any;
+ /**
+ * Point to a text file that lists each file (relative path to the path configured in the
+ * dataset) that you want to copy. Type: string (or Expression with resultType string).
+ */
+ fileListPath?: any;
+ /**
+ * Indicates whether to enable partition discovery.
+ */
+ enablePartitionDiscovery?: boolean;
+ /**
+ * Specify the root path where partition discovery starts from. Type: string (or Expression with
+ * resultType string).
+ */
+ partitionRootPath?: any;
+ /**
+ * Indicates whether the source files need to be deleted after copy completion. Default is false.
+ * Type: boolean (or Expression with resultType boolean).
+ */
+ deleteFilesAfterCompletion?: any;
+ /**
+ * The start of file's modified datetime. Type: string (or Expression with resultType string).
+ */
+ modifiedDatetimeStart?: any;
+ /**
+ * The end of file's modified datetime. Type: string (or Expression with resultType string).
+ */
+ modifiedDatetimeEnd?: any;
}
/**
- * A copy activity Oracle source.
+ * Azure data lake store read settings.
*/
-export interface OracleSource {
+export interface AzureDataLakeStoreReadSettings {
/**
* Polymorphic Discriminator
*/
- type: "OracleSource";
+ type: "AzureDataLakeStoreReadSettings";
/**
- * Source retry count. Type: integer (or Expression with resultType integer).
+ * The maximum concurrent connection count for the source data store. Type: integer (or
+ * Expression with resultType integer).
*/
- sourceRetryCount?: any;
+ maxConcurrentConnections?: any;
/**
- * Source retry wait. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * If true, files under the folder path will be read recursively. Default is true. Type: boolean
+ * (or Expression with resultType boolean).
*/
- sourceRetryWait?: any;
+ recursive?: any;
/**
- * The maximum concurrent connection count for the source data store. Type: integer (or
- * Expression with resultType integer).
+ * ADLS wildcardFolderPath. Type: string (or Expression with resultType string).
*/
- maxConcurrentConnections?: any;
+ wildcardFolderPath?: any;
/**
- * Oracle reader query. Type: string (or Expression with resultType string).
+ * ADLS wildcardFileName. Type: string (or Expression with resultType string).
*/
- oracleReaderQuery?: any;
+ wildcardFileName?: any;
/**
- * Query timeout. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * Point to a text file that lists each file (relative path to the path configured in the
+ * dataset) that you want to copy. Type: string (or Expression with resultType string).
*/
- queryTimeout?: any;
+ fileListPath?: any;
/**
- * The partition mechanism that will be used for Oracle read in parallel. Possible values
- * include: 'None', 'PhysicalPartitionsOfTable', 'DynamicRange'
+ * Lists files after the value (exclusive) based on file/folder names’ lexicographical order.
+ * Applies under the folderPath in data set, and filter files/sub-folders under the folderPath.
+ * Type: string (or Expression with resultType string).
*/
- partitionOption?: OraclePartitionOption;
+ listAfter?: any;
+ /**
+ * Lists files before the value (inclusive) based on file/folder names’ lexicographical order.
+ * Applies under the folderPath in data set, and filter files/sub-folders under the folderPath.
+ * Type: string (or Expression with resultType string).
+ */
+ listBefore?: any;
+ /**
+ * Indicates whether to enable partition discovery.
+ */
+ enablePartitionDiscovery?: boolean;
+ /**
+ * Specify the root path where partition discovery starts from. Type: string (or Expression with
+ * resultType string).
+ */
+ partitionRootPath?: any;
+ /**
+ * Indicates whether the source files need to be deleted after copy completion. Default is false.
+ * Type: boolean (or Expression with resultType boolean).
+ */
+ deleteFilesAfterCompletion?: any;
+ /**
+ * The start of file's modified datetime. Type: string (or Expression with resultType string).
+ */
+ modifiedDatetimeStart?: any;
+ /**
+ * The end of file's modified datetime. Type: string (or Expression with resultType string).
+ */
+ modifiedDatetimeEnd?: any;
+}
+
+/**
+ * Azure blobFS read settings.
+ */
+export interface AzureBlobFSReadSettings {
+ /**
+ * Polymorphic Discriminator
+ */
+ type: "AzureBlobFSReadSettings";
+ /**
+ * The maximum concurrent connection count for the source data store. Type: integer (or
+ * Expression with resultType integer).
+ */
+ maxConcurrentConnections?: any;
+ /**
+ * If true, files under the folder path will be read recursively. Default is true. Type: boolean
+ * (or Expression with resultType boolean).
+ */
+ recursive?: any;
+ /**
+ * Azure blobFS wildcardFolderPath. Type: string (or Expression with resultType string).
+ */
+ wildcardFolderPath?: any;
+ /**
+ * Azure blobFS wildcardFileName. Type: string (or Expression with resultType string).
+ */
+ wildcardFileName?: any;
+ /**
+ * Point to a text file that lists each file (relative path to the path configured in the
+ * dataset) that you want to copy. Type: string (or Expression with resultType string).
+ */
+ fileListPath?: any;
+ /**
+ * Indicates whether to enable partition discovery.
+ */
+ enablePartitionDiscovery?: boolean;
+ /**
+ * Specify the root path where partition discovery starts from. Type: string (or Expression with
+ * resultType string).
+ */
+ partitionRootPath?: any;
+ /**
+ * Indicates whether the source files need to be deleted after copy completion. Default is false.
+ * Type: boolean (or Expression with resultType boolean).
+ */
+ deleteFilesAfterCompletion?: any;
+ /**
+ * The start of file's modified datetime. Type: string (or Expression with resultType string).
+ */
+ modifiedDatetimeStart?: any;
+ /**
+ * The end of file's modified datetime. Type: string (or Expression with resultType string).
+ */
+ modifiedDatetimeEnd?: any;
+}
+
+/**
+ * Azure blob read settings.
+ */
+export interface AzureBlobStorageReadSettings {
+ /**
+ * Polymorphic Discriminator
+ */
+ type: "AzureBlobStorageReadSettings";
+ /**
+ * The maximum concurrent connection count for the source data store. Type: integer (or
+ * Expression with resultType integer).
+ */
+ maxConcurrentConnections?: any;
+ /**
+ * If true, files under the folder path will be read recursively. Default is true. Type: boolean
+ * (or Expression with resultType boolean).
+ */
+ recursive?: any;
+ /**
+ * Azure blob wildcardFolderPath. Type: string (or Expression with resultType string).
+ */
+ wildcardFolderPath?: any;
+ /**
+ * Azure blob wildcardFileName. Type: string (or Expression with resultType string).
+ */
+ wildcardFileName?: any;
+ /**
+ * The prefix filter for the Azure Blob name. Type: string (or Expression with resultType
+ * string).
+ */
+ prefix?: any;
+ /**
+ * Point to a text file that lists each file (relative path to the path configured in the
+ * dataset) that you want to copy. Type: string (or Expression with resultType string).
+ */
+ fileListPath?: any;
+ /**
+ * Indicates whether to enable partition discovery.
+ */
+ enablePartitionDiscovery?: boolean;
+ /**
+ * Specify the root path where partition discovery starts from. Type: string (or Expression with
+ * resultType string).
+ */
+ partitionRootPath?: any;
+ /**
+ * Indicates whether the source files need to be deleted after copy completion. Default is false.
+ * Type: boolean (or Expression with resultType boolean).
+ */
+ deleteFilesAfterCompletion?: any;
+ /**
+ * The start of file's modified datetime. Type: string (or Expression with resultType string).
+ */
+ modifiedDatetimeStart?: any;
+ /**
+ * The end of file's modified datetime. Type: string (or Expression with resultType string).
+ */
+ modifiedDatetimeEnd?: any;
+}
+
+/**
+ * Activity to get metadata of dataset
+ */
+export interface GetMetadataActivity {
+ /**
+ * Polymorphic Discriminator
+ */
+ type: "GetMetadata";
+ /**
+ * Activity name.
+ */
+ name: string;
+ /**
+ * Activity description.
+ */
+ description?: string;
+ /**
+ * Activity depends on condition.
+ */
+ dependsOn?: ActivityDependency[];
+ /**
+ * Activity user properties.
+ */
+ userProperties?: UserProperty[];
+ /**
+ * Linked service reference.
+ */
+ linkedServiceName?: LinkedServiceReference;
+ /**
+ * Activity policy.
+ */
+ policy?: ActivityPolicy;
+ /**
+ * GetMetadata activity dataset reference.
+ */
+ dataset: DatasetReference;
+ /**
+ * Fields of metadata to get from dataset.
+ */
+ fieldList?: any[];
+ /**
+ * GetMetadata activity store settings.
+ */
+ storeSettings?: StoreReadSettingsUnion;
+ /**
+ * GetMetadata activity format settings.
+ */
+ formatSettings?: FormatReadSettingsUnion;
+}
+
+/**
+ * Web activity authentication properties.
+ */
+export interface WebActivityAuthentication {
+ /**
+ * Web activity authentication (Basic/ClientCertificate/MSI)
+ */
+ type: string;
+ /**
+ * Base64-encoded contents of a PFX file.
+ */
+ pfx?: SecretBaseUnion;
+ /**
+ * Web activity authentication user name for basic authentication.
+ */
+ username?: string;
+ /**
+ * Password for the PFX file or basic authentication.
+ */
+ password?: SecretBaseUnion;
+ /**
+ * Resource for which Azure Auth token will be requested when using MSI Authentication.
+ */
+ resource?: string;
+}
+
+/**
+ * Web activity.
+ */
+export interface WebActivity {
+ /**
+ * Polymorphic Discriminator
+ */
+ type: "WebActivity";
+ /**
+ * Activity name.
+ */
+ name: string;
+ /**
+ * Activity description.
+ */
+ description?: string;
+ /**
+ * Activity depends on condition.
+ */
+ dependsOn?: ActivityDependency[];
+ /**
+ * Activity user properties.
+ */
+ userProperties?: UserProperty[];
+ /**
+ * Linked service reference.
+ */
+ linkedServiceName?: LinkedServiceReference;
+ /**
+ * Activity policy.
+ */
+ policy?: ActivityPolicy;
+ /**
+ * Rest API method for target endpoint. Possible values include: 'GET', 'POST', 'PUT', 'DELETE'
+ */
+ method: WebActivityMethod;
+ /**
+ * Web activity target endpoint and path. Type: string (or Expression with resultType string).
+ */
+ url: any;
+ /**
+ * Represents the headers that will be sent to the request. For example, to set the language and
+ * type on a request: "headers" : { "Accept-Language": "en-us", "Content-Type":
+ * "application/json" }. Type: string (or Expression with resultType string).
+ */
+ headers?: any;
+ /**
+ * Represents the payload that will be sent to the endpoint. Required for POST/PUT method, not
+ * allowed for GET method Type: string (or Expression with resultType string).
+ */
+ body?: any;
+ /**
+ * Authentication method used for calling the endpoint.
+ */
+ authentication?: WebActivityAuthentication;
+ /**
+ * List of datasets passed to web endpoint.
+ */
+ datasets?: DatasetReference[];
+ /**
+ * List of linked services passed to web endpoint.
+ */
+ linkedServices?: LinkedServiceReference[];
+ /**
+ * The integration runtime reference.
+ */
+ connectVia?: IntegrationRuntimeReference;
+}
+
+/**
+ * Contains the possible cases for CopySource.
+ */
+export type CopySourceUnion = CopySource | SharePointOnlineListSource | SnowflakeSource | HttpSource | AzureBlobFSSource | AzureDataLakeStoreSource | Office365Source | CosmosDbMongoDbApiSource | MongoDbV2Source | MongoDbSource | WebSource | OracleSource | AzureDataExplorerSource | HdfsSource | FileSystemSource | RestSource | SalesforceServiceCloudSource | ODataSource | MicrosoftAccessSource | RelationalSource | CommonDataServiceForAppsSource | DynamicsCrmSource | DynamicsSource | CosmosDbSqlApiSource | DocumentDbCollectionSource | BlobSource | TabularSourceUnion | BinarySource | OrcSource | XmlSource | JsonSource | DelimitedTextSource | ParquetSource | ExcelSource | AvroSource;
+
+/**
+ * A copy activity source.
+ */
+export interface CopySource {
+ /**
+ * Polymorphic Discriminator
+ */
+ type: "CopySource";
+ /**
+ * Source retry count. Type: integer (or Expression with resultType integer).
+ */
+ sourceRetryCount?: any;
+ /**
+ * Source retry wait. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ */
+ sourceRetryWait?: any;
/**
- * The settings that will be leveraged for Oracle source partitioning.
+ * The maximum concurrent connection count for the source data store. Type: integer (or
+ * Expression with resultType integer).
*/
- partitionSettings?: OraclePartitionSettings;
+ maxConcurrentConnections?: any;
/**
- * Specifies the additional columns to be added to source data. Type: array of objects (or
- * Expression with resultType array of objects).
+ * Describes unknown properties. The value of an unknown property can be of "any" type.
*/
- additionalColumns?: AdditionalColumns[];
+ [property: string]: any;
}
/**
- * A copy activity Azure Data Explorer (Kusto) source.
+ * A copy activity source for sharePoint online list source.
*/
-export interface AzureDataExplorerSource {
+export interface SharePointOnlineListSource {
/**
* Polymorphic Discriminator
*/
- type: "AzureDataExplorerSource";
+ type: "SharePointOnlineListSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -14128,56 +14866,67 @@ export interface AzureDataExplorerSource {
*/
maxConcurrentConnections?: any;
/**
- * Database query. Should be a Kusto Query Language (KQL) query. Type: string (or Expression with
- * resultType string).
+ * The OData query to filter the data in SharePoint Online list. For example, "$top=1". Type:
+ * string (or Expression with resultType string).
*/
- query: any;
+ query?: any;
/**
- * The name of the Boolean option that controls whether truncation is applied to result-sets that
- * go beyond a certain row-count limit.
+ * The wait time to get a response from SharePoint Online. Default value is 5 minutes (00:05:00).
+ * Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- noTruncation?: any;
+ httpRequestTimeout?: any;
+}
+
+/**
+ * Contains the possible cases for ExportSettings.
+ */
+export type ExportSettingsUnion = ExportSettings | SnowflakeExportCopyCommand;
+
+/**
+ * Export command settings.
+ */
+export interface ExportSettings {
/**
- * Query timeout. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9]))..
+ * Polymorphic Discriminator
*/
- queryTimeout?: any;
+ type: "ExportSettings";
/**
- * Specifies the additional columns to be added to source data. Type: array of objects (or
- * Expression with resultType array of objects).
+ * Describes unknown properties. The value of an unknown property can be of "any" type.
*/
- additionalColumns?: AdditionalColumns[];
+ [property: string]: any;
}
/**
- * Distcp settings.
+ * Snowflake export command settings.
*/
-export interface DistcpSettings {
+export interface SnowflakeExportCopyCommand {
/**
- * Specifies the Yarn ResourceManager endpoint. Type: string (or Expression with resultType
- * string).
+ * Polymorphic Discriminator
*/
- resourceManagerEndpoint: any;
+ type: "SnowflakeExportCopyCommand";
/**
- * Specifies an existing folder path which will be used to store temp Distcp command script. The
- * script file is generated by ADF and will be removed after Copy job finished. Type: string (or
- * Expression with resultType string).
+ * Additional copy options directly passed to snowflake Copy Command. Type: key value pairs
+ * (value should be string type) (or Expression with resultType object). Example:
+ * "additionalCopyOptions": { "DATE_FORMAT": "MM/DD/YYYY", "TIME_FORMAT": "'HH24:MI:SS.FF'" }
*/
- tempScriptPath: any;
+ additionalCopyOptions?: { [propertyName: string]: any };
/**
- * Specifies the Distcp options. Type: string (or Expression with resultType string).
+ * Additional format options directly passed to snowflake Copy Command. Type: key value pairs
+ * (value should be string type) (or Expression with resultType object). Example:
+ * "additionalFormatOptions": { "OVERWRITE": "TRUE", "MAX_FILE_SIZE": "'FALSE'" }
*/
- distcpOptions?: any;
+ additionalFormatOptions?: { [propertyName: string]: any };
}
/**
- * A copy activity HDFS source.
+ * A copy activity snowflake source.
*/
-export interface HdfsSource {
+export interface SnowflakeSource {
/**
* Polymorphic Discriminator
*/
- type: "HdfsSource";
+ type: "SnowflakeSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -14193,24 +14942,23 @@ export interface HdfsSource {
*/
maxConcurrentConnections?: any;
/**
- * If true, files under the folder path will be read recursively. Default is true. Type: boolean
- * (or Expression with resultType boolean).
+ * Snowflake Sql query. Type: string (or Expression with resultType string).
*/
- recursive?: any;
+ query?: any;
/**
- * Specifies Distcp-related settings.
+ * Snowflake export settings.
*/
- distcpSettings?: DistcpSettings;
+ exportSettings?: SnowflakeExportCopyCommand;
}
/**
- * A copy activity file system source.
+ * A copy activity source for an HTTP file.
*/
-export interface FileSystemSource {
+export interface HttpSource {
/**
* Polymorphic Discriminator
*/
- type: "FileSystemSource";
+ type: "HttpSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -14226,25 +14974,21 @@ export interface FileSystemSource {
*/
maxConcurrentConnections?: any;
/**
- * If true, files under the folder path will be read recursively. Default is true. Type: boolean
- * (or Expression with resultType boolean).
- */
- recursive?: any;
- /**
- * Specifies the additional columns to be added to source data. Type: array of objects (or
- * Expression with resultType array of objects).
+ * Specifies the timeout for a HTTP client to get HTTP response from HTTP server. The default
+ * value is equivalent to System.Net.HttpWebRequest.Timeout. Type: string (or Expression with
+ * resultType string), pattern: ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- additionalColumns?: AdditionalColumns[];
+ httpRequestTimeout?: any;
}
/**
- * A copy activity Rest service source.
+ * A copy activity Azure BlobFS source.
*/
-export interface RestSource {
+export interface AzureBlobFSSource {
/**
* Polymorphic Discriminator
*/
- type: "RestSource";
+ type: "AzureBlobFSSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -14260,50 +15004,29 @@ export interface RestSource {
*/
maxConcurrentConnections?: any;
/**
- * The HTTP method used to call the RESTful API. The default is GET. Type: string (or Expression
- * with resultType string).
- */
- requestMethod?: any;
- /**
- * The HTTP request body to the RESTful API if requestMethod is POST. Type: string (or Expression
- * with resultType string).
- */
- requestBody?: any;
- /**
- * The additional HTTP headers in the request to the RESTful API. Type: string (or Expression
- * with resultType string).
- */
- additionalHeaders?: any;
- /**
- * The pagination rules to compose next page requests. Type: string (or Expression with
- * resultType string).
- */
- paginationRules?: any;
- /**
- * The timeout (TimeSpan) to get an HTTP response. It is the timeout to get a response, not the
- * timeout to read response data. Default value: 00:01:40. Type: string (or Expression with
- * resultType string), pattern: ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * Treat empty as null. Type: boolean (or Expression with resultType boolean).
*/
- httpRequestTimeout?: any;
+ treatEmptyAsNull?: any;
/**
- * The time to await before sending next page request.
+ * Number of header lines to skip from each blob. Type: integer (or Expression with resultType
+ * integer).
*/
- requestInterval?: any;
+ skipHeaderLineCount?: any;
/**
- * Specifies the additional columns to be added to source data. Type: array of objects (or
- * Expression with resultType array of objects).
+ * If true, files under the folder path will be read recursively. Default is true. Type: boolean
+ * (or Expression with resultType boolean).
*/
- additionalColumns?: AdditionalColumns[];
+ recursive?: any;
}
/**
- * A copy activity Salesforce Service Cloud source.
+ * A copy activity Azure Data Lake source.
*/
-export interface SalesforceServiceCloudSource {
+export interface AzureDataLakeStoreSource {
/**
* Polymorphic Discriminator
*/
- type: "SalesforceServiceCloudSource";
+ type: "AzureDataLakeStoreSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -14319,29 +15042,20 @@ export interface SalesforceServiceCloudSource {
*/
maxConcurrentConnections?: any;
/**
- * Database query. Type: string (or Expression with resultType string).
- */
- query?: any;
- /**
- * The read behavior for the operation. Default is Query. Possible values include: 'Query',
- * 'QueryAll'
- */
- readBehavior?: SalesforceSourceReadBehavior;
- /**
- * Specifies the additional columns to be added to source data. Type: array of objects (or
- * Expression with resultType array of objects).
+ * If true, files under the folder path will be read recursively. Default is true. Type: boolean
+ * (or Expression with resultType boolean).
*/
- additionalColumns?: AdditionalColumns[];
+ recursive?: any;
}
/**
- * A copy activity source for OData source.
+ * A copy activity source for an Office 365 service.
*/
-export interface ODataSource {
+export interface Office365Source {
/**
* Polymorphic Discriminator
*/
- type: "ODataSource";
+ type: "Office365Source";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -14357,57 +15071,91 @@ export interface ODataSource {
*/
maxConcurrentConnections?: any;
/**
- * OData query. For example, "$top=1". Type: string (or Expression with resultType string).
+ * The groups containing all the users. Type: array of strings (or Expression with resultType
+ * array of strings).
*/
- query?: any;
+ allowedGroups?: any;
/**
- * Specifies the additional columns to be added to source data. Type: array of objects (or
- * Expression with resultType array of objects).
+ * The user scope uri. Type: string (or Expression with resultType string).
*/
- additionalColumns?: AdditionalColumns[];
+ userScopeFilterUri?: any;
+ /**
+ * The Column to apply the and . Type:
+ * string (or Expression with resultType string).
+ */
+ dateFilterColumn?: any;
+ /**
+ * Start time of the requested range for this dataset. Type: string (or Expression with
+ * resultType string).
+ */
+ startTime?: any;
+ /**
+ * End time of the requested range for this dataset. Type: string (or Expression with resultType
+ * string).
+ */
+ endTime?: any;
+ /**
+ * The columns to be read out from the Office 365 table. Type: array of objects (or Expression
+ * with resultType array of objects). Example: [ { "name": "Id" }, { "name": "CreatedDateTime" }
+ * ]
+ */
+ outputColumns?: any;
}
/**
- * A copy activity source for Microsoft Access.
+ * Specify the column name and value of additional columns.
*/
-export interface MicrosoftAccessSource {
+export interface AdditionalColumns {
/**
- * Polymorphic Discriminator
+ * Additional column name. Type: string (or Expression with resultType string).
*/
- type: "MicrosoftAccessSource";
+ name?: any;
/**
- * Source retry count. Type: integer (or Expression with resultType integer).
+ * Additional column value. Type: string (or Expression with resultType string).
*/
- sourceRetryCount?: any;
+ value?: any;
+}
+
+/**
+ * Cursor methods for Mongodb query
+ */
+export interface MongoDbCursorMethodsProperties {
/**
- * Source retry wait. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * Specifies the fields to return in the documents that match the query filter. To return all
+ * fields in the matching documents, omit this parameter. Type: string (or Expression with
+ * resultType string).
*/
- sourceRetryWait?: any;
+ project?: any;
/**
- * The maximum concurrent connection count for the source data store. Type: integer (or
- * Expression with resultType integer).
+ * Specifies the order in which the query returns matching documents. Type: string (or Expression
+ * with resultType string). Type: string (or Expression with resultType string).
*/
- maxConcurrentConnections?: any;
+ sort?: any;
/**
- * Database query. Type: string (or Expression with resultType string).
+ * Specifies the how many documents skipped and where MongoDB begins returning results. This
+ * approach may be useful in implementing paginated results. Type: integer (or Expression with
+ * resultType integer).
*/
- query?: any;
+ skip?: any;
/**
- * Specifies the additional columns to be added to source data. Type: array of objects (or
- * Expression with resultType array of objects).
+ * Specifies the maximum number of documents the server returns. limit() is analogous to the
+ * LIMIT statement in a SQL database. Type: integer (or Expression with resultType integer).
*/
- additionalColumns?: AdditionalColumns[];
+ limit?: any;
+ /**
+ * Describes unknown properties. The value of an unknown property can be of "any" type.
+ */
+ [property: string]: any;
}
/**
- * A copy activity source for various relational databases.
+ * A copy activity source for a CosmosDB (MongoDB API) database.
*/
-export interface RelationalSource {
+export interface CosmosDbMongoDbApiSource {
/**
* Polymorphic Discriminator
*/
- type: "RelationalSource";
+ type: "CosmosDbMongoDbApiSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -14423,9 +15171,27 @@ export interface RelationalSource {
*/
maxConcurrentConnections?: any;
/**
- * Database query. Type: string (or Expression with resultType string).
+ * Specifies selection filter using query operators. To return all documents in a collection,
+ * omit this parameter or pass an empty document ({}). Type: string (or Expression with
+ * resultType string).
*/
- query?: any;
+ filter?: any;
+ /**
+ * Cursor methods for Mongodb query.
+ */
+ cursorMethods?: MongoDbCursorMethodsProperties;
+ /**
+ * Specifies the number of documents to return in each batch of the response from MongoDB
+ * instance. In most cases, modifying the batch size will not affect the user or the application.
+ * This property's main purpose is to avoid hit the limitation of response size. Type: integer
+ * (or Expression with resultType integer).
+ */
+ batchSize?: any;
+ /**
+ * Query timeout. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ */
+ queryTimeout?: any;
/**
* Specifies the additional columns to be added to source data. Type: array of objects (or
* Expression with resultType array of objects).
@@ -14434,13 +15200,13 @@ export interface RelationalSource {
}
/**
- * A copy activity Common Data Service for Apps source.
+ * A copy activity source for a MongoDB database.
*/
-export interface CommonDataServiceForAppsSource {
+export interface MongoDbV2Source {
/**
* Polymorphic Discriminator
*/
- type: "CommonDataServiceForAppsSource";
+ type: "MongoDbV2Source";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -14456,10 +15222,27 @@ export interface CommonDataServiceForAppsSource {
*/
maxConcurrentConnections?: any;
/**
- * FetchXML is a proprietary query language that is used in Microsoft Common Data Service for
- * Apps (online & on-premises). Type: string (or Expression with resultType string).
+ * Specifies selection filter using query operators. To return all documents in a collection,
+ * omit this parameter or pass an empty document ({}). Type: string (or Expression with
+ * resultType string).
*/
- query?: any;
+ filter?: any;
+ /**
+ * Cursor methods for Mongodb query
+ */
+ cursorMethods?: MongoDbCursorMethodsProperties;
+ /**
+ * Specifies the number of documents to return in each batch of the response from MongoDB
+ * instance. In most cases, modifying the batch size will not affect the user or the application.
+ * This property's main purpose is to avoid hit the limitation of response size. Type: integer
+ * (or Expression with resultType integer).
+ */
+ batchSize?: any;
+ /**
+ * Query timeout. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ */
+ queryTimeout?: any;
/**
* Specifies the additional columns to be added to source data. Type: array of objects (or
* Expression with resultType array of objects).
@@ -14468,13 +15251,13 @@ export interface CommonDataServiceForAppsSource {
}
/**
- * A copy activity Dynamics CRM source.
+ * A copy activity source for a MongoDB database.
*/
-export interface DynamicsCrmSource {
+export interface MongoDbSource {
/**
* Polymorphic Discriminator
*/
- type: "DynamicsCrmSource";
+ type: "MongoDbSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -14490,8 +15273,8 @@ export interface DynamicsCrmSource {
*/
maxConcurrentConnections?: any;
/**
- * FetchXML is a proprietary query language that is used in Microsoft Dynamics CRM (online &
- * on-premises). Type: string (or Expression with resultType string).
+ * Database query. Should be a SQL-92 query expression. Type: string (or Expression with
+ * resultType string).
*/
query?: any;
/**
@@ -14502,13 +15285,13 @@ export interface DynamicsCrmSource {
}
/**
- * A copy activity Dynamics source.
+ * A copy activity source for web page table.
*/
-export interface DynamicsSource {
+export interface WebSource {
/**
* Polymorphic Discriminator
*/
- type: "DynamicsSource";
+ type: "WebSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -14523,11 +15306,6 @@ export interface DynamicsSource {
* Expression with resultType integer).
*/
maxConcurrentConnections?: any;
- /**
- * FetchXML is a proprietary query language that is used in Microsoft Dynamics (online &
- * on-premises). Type: string (or Expression with resultType string).
- */
- query?: any;
/**
* Specifies the additional columns to be added to source data. Type: array of objects (or
* Expression with resultType array of objects).
@@ -14536,13 +15314,38 @@ export interface DynamicsSource {
}
/**
- * A copy activity Azure CosmosDB (SQL API) Collection source.
+ * The settings that will be leveraged for Oracle source partitioning.
*/
-export interface CosmosDbSqlApiSource {
+export interface OraclePartitionSettings {
+ /**
+ * Names of the physical partitions of Oracle table.
+ */
+ partitionNames?: any;
+ /**
+ * The name of the column in integer type that will be used for proceeding range partitioning.
+ * Type: string (or Expression with resultType string).
+ */
+ partitionColumnName?: any;
+ /**
+ * The maximum value of column specified in partitionColumnName that will be used for proceeding
+ * range partitioning. Type: string (or Expression with resultType string).
+ */
+ partitionUpperBound?: any;
+ /**
+ * The minimum value of column specified in partitionColumnName that will be used for proceeding
+ * range partitioning. Type: string (or Expression with resultType string).
+ */
+ partitionLowerBound?: any;
+}
+
+/**
+ * A copy activity Oracle source.
+ */
+export interface OracleSource {
/**
* Polymorphic Discriminator
*/
- type: "CosmosDbSqlApiSource";
+ type: "OracleSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -14558,17 +15361,23 @@ export interface CosmosDbSqlApiSource {
*/
maxConcurrentConnections?: any;
/**
- * SQL API query. Type: string (or Expression with resultType string).
+ * Oracle reader query. Type: string (or Expression with resultType string).
*/
- query?: any;
+ oracleReaderQuery?: any;
/**
- * Page size of the result. Type: integer (or Expression with resultType integer).
+ * Query timeout. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- pageSize?: any;
+ queryTimeout?: any;
/**
- * Preferred regions. Type: array of strings (or Expression with resultType array of strings).
+ * The partition mechanism that will be used for Oracle read in parallel. Possible values
+ * include: 'None', 'PhysicalPartitionsOfTable', 'DynamicRange'
*/
- preferredRegions?: any;
+ partitionOption?: OraclePartitionOption;
+ /**
+ * The settings that will be leveraged for Oracle source partitioning.
+ */
+ partitionSettings?: OraclePartitionSettings;
/**
* Specifies the additional columns to be added to source data. Type: array of objects (or
* Expression with resultType array of objects).
@@ -14577,13 +15386,13 @@ export interface CosmosDbSqlApiSource {
}
/**
- * A copy activity Document Database Collection source.
+ * A copy activity Azure Data Explorer (Kusto) source.
*/
-export interface DocumentDbCollectionSource {
+export interface AzureDataExplorerSource {
/**
* Polymorphic Discriminator
*/
- type: "DocumentDbCollectionSource";
+ type: "AzureDataExplorerSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -14599,16 +15408,18 @@ export interface DocumentDbCollectionSource {
*/
maxConcurrentConnections?: any;
/**
- * Documents query. Type: string (or Expression with resultType string).
+ * Database query. Should be a Kusto Query Language (KQL) query. Type: string (or Expression with
+ * resultType string).
*/
- query?: any;
+ query: any;
/**
- * Nested properties separator. Type: string (or Expression with resultType string).
+ * The name of the Boolean option that controls whether truncation is applied to result-sets that
+ * go beyond a certain row-count limit.
*/
- nestingSeparator?: any;
+ noTruncation?: any;
/**
* Query timeout. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9]))..
*/
queryTimeout?: any;
/**
@@ -14619,13 +15430,13 @@ export interface DocumentDbCollectionSource {
}
/**
- * A copy activity Azure Blob source.
+ * A copy activity HDFS source.
*/
-export interface BlobSource {
+export interface HdfsSource {
/**
* Polymorphic Discriminator
*/
- type: "BlobSource";
+ type: "HdfsSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -14640,54 +15451,25 @@ export interface BlobSource {
* Expression with resultType integer).
*/
maxConcurrentConnections?: any;
- /**
- * Treat empty as null. Type: boolean (or Expression with resultType boolean).
- */
- treatEmptyAsNull?: any;
- /**
- * Number of header lines to skip from each blob. Type: integer (or Expression with resultType
- * integer).
- */
- skipHeaderLineCount?: any;
/**
* If true, files under the folder path will be read recursively. Default is true. Type: boolean
* (or Expression with resultType boolean).
*/
recursive?: any;
-}
-
-/**
- * The Amazon S3 settings needed for the interim Amazon S3 when copying from Amazon Redshift with
- * unload. With this, data from Amazon Redshift source will be unloaded into S3 first and then
- * copied into the targeted sink from the interim S3.
- */
-export interface RedshiftUnloadSettings {
- /**
- * The name of the Amazon S3 linked service which will be used for the unload operation when
- * copying from the Amazon Redshift source.
- */
- s3LinkedServiceName: LinkedServiceReference;
/**
- * The bucket of the interim Amazon S3 which will be used to store the unloaded data from Amazon
- * Redshift source. The bucket must be in the same region as the Amazon Redshift source. Type:
- * string (or Expression with resultType string).
+ * Specifies Distcp-related settings.
*/
- bucketName: any;
+ distcpSettings?: DistcpSettings;
}
/**
- * Contains the possible cases for TabularSource.
- */
-export type TabularSourceUnion = TabularSource | AmazonRedshiftSource | GoogleAdWordsSource | OracleServiceCloudSource | DynamicsAXSource | ResponsysSource | SalesforceMarketingCloudSource | VerticaSource | NetezzaSource | ZohoSource | XeroSource | SquareSource | SparkSource | ShopifySource | ServiceNowSource | QuickBooksSource | PrestoSource | PhoenixSource | PaypalSource | MarketoSource | AzureMariaDBSource | MariaDBSource | MagentoSource | JiraSource | ImpalaSource | HubspotSource | HiveSource | HBaseSource | GreenplumSource | GoogleBigQuerySource | EloquaSource | DrillSource | CouchbaseSource | ConcurSource | AzurePostgreSqlSource | AmazonMWSSource | CassandraSource | TeradataSource | AzureMySqlSource | SqlDWSource | SqlMISource | AzureSqlSource | SqlServerSource | SqlSource | SapTableSource | SapOpenHubSource | SapHanaSource | SapEccSource | SapCloudForCustomerSource | SalesforceSource | SapBwSource | SybaseSource | PostgreSqlSource | MySqlSource | OdbcSource | Db2Source | InformixSource | AzureTableSource;
-
-/**
- * Copy activity sources of tabular type.
+ * A copy activity file system source.
*/
-export interface TabularSource {
+export interface FileSystemSource {
/**
* Polymorphic Discriminator
*/
- type: "TabularSource";
+ type: "FileSystemSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -14703,10 +15485,10 @@ export interface TabularSource {
*/
maxConcurrentConnections?: any;
/**
- * Query timeout. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * If true, files under the folder path will be read recursively. Default is true. Type: boolean
+ * (or Expression with resultType boolean).
*/
- queryTimeout?: any;
+ recursive?: any;
/**
* Specifies the additional columns to be added to source data. Type: array of objects (or
* Expression with resultType array of objects).
@@ -14715,13 +15497,13 @@ export interface TabularSource {
}
/**
- * A copy activity source for Amazon Redshift Source.
+ * A copy activity Rest service source.
*/
-export interface AmazonRedshiftSource {
+export interface RestSource {
/**
* Polymorphic Discriminator
*/
- type: "AmazonRedshiftSource";
+ type: "RestSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -14737,35 +15519,50 @@ export interface AmazonRedshiftSource {
*/
maxConcurrentConnections?: any;
/**
- * Query timeout. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * The HTTP method used to call the RESTful API. The default is GET. Type: string (or Expression
+ * with resultType string).
*/
- queryTimeout?: any;
+ requestMethod?: any;
/**
- * Specifies the additional columns to be added to source data. Type: array of objects (or
- * Expression with resultType array of objects).
+ * The HTTP request body to the RESTful API if requestMethod is POST. Type: string (or Expression
+ * with resultType string).
*/
- additionalColumns?: AdditionalColumns[];
+ requestBody?: any;
/**
- * Database query. Type: string (or Expression with resultType string).
+ * The additional HTTP headers in the request to the RESTful API. Type: string (or Expression
+ * with resultType string).
*/
- query?: any;
+ additionalHeaders?: any;
/**
- * The Amazon S3 settings needed for the interim Amazon S3 when copying from Amazon Redshift with
- * unload. With this, data from Amazon Redshift source will be unloaded into S3 first and then
- * copied into the targeted sink from the interim S3.
+ * The pagination rules to compose next page requests. Type: string (or Expression with
+ * resultType string).
*/
- redshiftUnloadSettings?: RedshiftUnloadSettings;
+ paginationRules?: any;
+ /**
+ * The timeout (TimeSpan) to get an HTTP response. It is the timeout to get a response, not the
+ * timeout to read response data. Default value: 00:01:40. Type: string (or Expression with
+ * resultType string), pattern: ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ */
+ httpRequestTimeout?: any;
+ /**
+ * The time to await before sending next page request.
+ */
+ requestInterval?: any;
+ /**
+ * Specifies the additional columns to be added to source data. Type: array of objects (or
+ * Expression with resultType array of objects).
+ */
+ additionalColumns?: AdditionalColumns[];
}
/**
- * A copy activity Google AdWords service source.
+ * A copy activity Salesforce Service Cloud source.
*/
-export interface GoogleAdWordsSource {
+export interface SalesforceServiceCloudSource {
/**
* Polymorphic Discriminator
*/
- type: "GoogleAdWordsSource";
+ type: "SalesforceServiceCloudSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -14781,29 +15578,29 @@ export interface GoogleAdWordsSource {
*/
maxConcurrentConnections?: any;
/**
- * Query timeout. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * Database query. Type: string (or Expression with resultType string).
*/
- queryTimeout?: any;
+ query?: any;
+ /**
+ * The read behavior for the operation. Default is Query. Possible values include: 'Query',
+ * 'QueryAll'
+ */
+ readBehavior?: SalesforceSourceReadBehavior;
/**
* Specifies the additional columns to be added to source data. Type: array of objects (or
* Expression with resultType array of objects).
*/
additionalColumns?: AdditionalColumns[];
- /**
- * A query to retrieve data from source. Type: string (or Expression with resultType string).
- */
- query?: any;
}
/**
- * A copy activity Oracle Service Cloud source.
+ * A copy activity source for OData source.
*/
-export interface OracleServiceCloudSource {
+export interface ODataSource {
/**
* Polymorphic Discriminator
*/
- type: "OracleServiceCloudSource";
+ type: "ODataSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -14819,29 +15616,30 @@ export interface OracleServiceCloudSource {
*/
maxConcurrentConnections?: any;
/**
- * Query timeout. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * OData query. For example, "$top=1". Type: string (or Expression with resultType string).
*/
- queryTimeout?: any;
+ query?: any;
+ /**
+ * The timeout (TimeSpan) to get an HTTP response. It is the timeout to get a response, not the
+ * timeout to read response data. Default value: 00:05:00. Type: string (or Expression with
+ * resultType string), pattern: ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ */
+ httpRequestTimeout?: any;
/**
* Specifies the additional columns to be added to source data. Type: array of objects (or
* Expression with resultType array of objects).
*/
additionalColumns?: AdditionalColumns[];
- /**
- * A query to retrieve data from source. Type: string (or Expression with resultType string).
- */
- query?: any;
}
/**
- * A copy activity Dynamics AX source.
+ * A copy activity source for Microsoft Access.
*/
-export interface DynamicsAXSource {
+export interface MicrosoftAccessSource {
/**
* Polymorphic Discriminator
*/
- type: "DynamicsAXSource";
+ type: "MicrosoftAccessSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -14857,29 +15655,24 @@ export interface DynamicsAXSource {
*/
maxConcurrentConnections?: any;
/**
- * Query timeout. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * Database query. Type: string (or Expression with resultType string).
*/
- queryTimeout?: any;
+ query?: any;
/**
* Specifies the additional columns to be added to source data. Type: array of objects (or
* Expression with resultType array of objects).
*/
additionalColumns?: AdditionalColumns[];
- /**
- * A query to retrieve data from source. Type: string (or Expression with resultType string).
- */
- query?: any;
}
/**
- * A copy activity Responsys source.
+ * A copy activity source for various relational databases.
*/
-export interface ResponsysSource {
+export interface RelationalSource {
/**
* Polymorphic Discriminator
*/
- type: "ResponsysSource";
+ type: "RelationalSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -14895,29 +15688,24 @@ export interface ResponsysSource {
*/
maxConcurrentConnections?: any;
/**
- * Query timeout. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * Database query. Type: string (or Expression with resultType string).
*/
- queryTimeout?: any;
+ query?: any;
/**
* Specifies the additional columns to be added to source data. Type: array of objects (or
* Expression with resultType array of objects).
*/
additionalColumns?: AdditionalColumns[];
- /**
- * A query to retrieve data from source. Type: string (or Expression with resultType string).
- */
- query?: any;
}
/**
- * A copy activity Salesforce Marketing Cloud source.
+ * A copy activity Common Data Service for Apps source.
*/
-export interface SalesforceMarketingCloudSource {
+export interface CommonDataServiceForAppsSource {
/**
* Polymorphic Discriminator
*/
- type: "SalesforceMarketingCloudSource";
+ type: "CommonDataServiceForAppsSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -14933,29 +15721,25 @@ export interface SalesforceMarketingCloudSource {
*/
maxConcurrentConnections?: any;
/**
- * Query timeout. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * FetchXML is a proprietary query language that is used in Microsoft Common Data Service for
+ * Apps (online & on-premises). Type: string (or Expression with resultType string).
*/
- queryTimeout?: any;
+ query?: any;
/**
* Specifies the additional columns to be added to source data. Type: array of objects (or
* Expression with resultType array of objects).
*/
additionalColumns?: AdditionalColumns[];
- /**
- * A query to retrieve data from source. Type: string (or Expression with resultType string).
- */
- query?: any;
}
/**
- * A copy activity Vertica source.
+ * A copy activity Dynamics CRM source.
*/
-export interface VerticaSource {
+export interface DynamicsCrmSource {
/**
* Polymorphic Discriminator
*/
- type: "VerticaSource";
+ type: "DynamicsCrmSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -14971,50 +15755,25 @@ export interface VerticaSource {
*/
maxConcurrentConnections?: any;
/**
- * Query timeout. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * FetchXML is a proprietary query language that is used in Microsoft Dynamics CRM (online &
+ * on-premises). Type: string (or Expression with resultType string).
*/
- queryTimeout?: any;
+ query?: any;
/**
* Specifies the additional columns to be added to source data. Type: array of objects (or
* Expression with resultType array of objects).
*/
additionalColumns?: AdditionalColumns[];
- /**
- * A query to retrieve data from source. Type: string (or Expression with resultType string).
- */
- query?: any;
-}
-
-/**
- * The settings that will be leveraged for Netezza source partitioning.
- */
-export interface NetezzaPartitionSettings {
- /**
- * The name of the column in integer type that will be used for proceeding range partitioning.
- * Type: string (or Expression with resultType string).
- */
- partitionColumnName?: any;
- /**
- * The maximum value of column specified in partitionColumnName that will be used for proceeding
- * range partitioning. Type: string (or Expression with resultType string).
- */
- partitionUpperBound?: any;
- /**
- * The minimum value of column specified in partitionColumnName that will be used for proceeding
- * range partitioning. Type: string (or Expression with resultType string).
- */
- partitionLowerBound?: any;
}
/**
- * A copy activity Netezza source.
+ * A copy activity Dynamics source.
*/
-export interface NetezzaSource {
+export interface DynamicsSource {
/**
* Polymorphic Discriminator
*/
- type: "NetezzaSource";
+ type: "DynamicsSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -15030,38 +15789,25 @@ export interface NetezzaSource {
*/
maxConcurrentConnections?: any;
/**
- * Query timeout. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * FetchXML is a proprietary query language that is used in Microsoft Dynamics (online &
+ * on-premises). Type: string (or Expression with resultType string).
*/
- queryTimeout?: any;
+ query?: any;
/**
* Specifies the additional columns to be added to source data. Type: array of objects (or
* Expression with resultType array of objects).
*/
additionalColumns?: AdditionalColumns[];
- /**
- * A query to retrieve data from source. Type: string (or Expression with resultType string).
- */
- query?: any;
- /**
- * The partition mechanism that will be used for Netezza read in parallel. Possible values
- * include: 'None', 'DataSlice', 'DynamicRange'
- */
- partitionOption?: NetezzaPartitionOption;
- /**
- * The settings that will be leveraged for Netezza source partitioning.
- */
- partitionSettings?: NetezzaPartitionSettings;
}
/**
- * A copy activity Zoho server source.
+ * A copy activity Azure CosmosDB (SQL API) Collection source.
*/
-export interface ZohoSource {
+export interface CosmosDbSqlApiSource {
/**
* Polymorphic Discriminator
*/
- type: "ZohoSource";
+ type: "CosmosDbSqlApiSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -15077,29 +15823,32 @@ export interface ZohoSource {
*/
maxConcurrentConnections?: any;
/**
- * Query timeout. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * SQL API query. Type: string (or Expression with resultType string).
*/
- queryTimeout?: any;
+ query?: any;
+ /**
+ * Page size of the result. Type: integer (or Expression with resultType integer).
+ */
+ pageSize?: any;
+ /**
+ * Preferred regions. Type: array of strings (or Expression with resultType array of strings).
+ */
+ preferredRegions?: any;
/**
* Specifies the additional columns to be added to source data. Type: array of objects (or
* Expression with resultType array of objects).
*/
additionalColumns?: AdditionalColumns[];
- /**
- * A query to retrieve data from source. Type: string (or Expression with resultType string).
- */
- query?: any;
}
/**
- * A copy activity Xero Service source.
+ * A copy activity Document Database Collection source.
*/
-export interface XeroSource {
+export interface DocumentDbCollectionSource {
/**
* Polymorphic Discriminator
*/
- type: "XeroSource";
+ type: "DocumentDbCollectionSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -15114,6 +15863,14 @@ export interface XeroSource {
* Expression with resultType integer).
*/
maxConcurrentConnections?: any;
+ /**
+ * Documents query. Type: string (or Expression with resultType string).
+ */
+ query?: any;
+ /**
+ * Nested properties separator. Type: string (or Expression with resultType string).
+ */
+ nestingSeparator?: any;
/**
* Query timeout. Type: string (or Expression with resultType string), pattern:
* ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
@@ -15124,20 +15881,16 @@ export interface XeroSource {
* Expression with resultType array of objects).
*/
additionalColumns?: AdditionalColumns[];
- /**
- * A query to retrieve data from source. Type: string (or Expression with resultType string).
- */
- query?: any;
}
/**
- * A copy activity Square Service source.
+ * A copy activity Azure Blob source.
*/
-export interface SquareSource {
+export interface BlobSource {
/**
* Polymorphic Discriminator
*/
- type: "SquareSource";
+ type: "BlobSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -15153,29 +15906,53 @@ export interface SquareSource {
*/
maxConcurrentConnections?: any;
/**
- * Query timeout. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * Treat empty as null. Type: boolean (or Expression with resultType boolean).
*/
- queryTimeout?: any;
+ treatEmptyAsNull?: any;
/**
- * Specifies the additional columns to be added to source data. Type: array of objects (or
- * Expression with resultType array of objects).
+ * Number of header lines to skip from each blob. Type: integer (or Expression with resultType
+ * integer).
*/
- additionalColumns?: AdditionalColumns[];
+ skipHeaderLineCount?: any;
/**
- * A query to retrieve data from source. Type: string (or Expression with resultType string).
+ * If true, files under the folder path will be read recursively. Default is true. Type: boolean
+ * (or Expression with resultType boolean).
*/
- query?: any;
+ recursive?: any;
}
/**
- * A copy activity Spark Server source.
+ * The Amazon S3 settings needed for the interim Amazon S3 when copying from Amazon Redshift with
+ * unload. With this, data from Amazon Redshift source will be unloaded into S3 first and then
+ * copied into the targeted sink from the interim S3.
*/
-export interface SparkSource {
+export interface RedshiftUnloadSettings {
+ /**
+ * The name of the Amazon S3 linked service which will be used for the unload operation when
+ * copying from the Amazon Redshift source.
+ */
+ s3LinkedServiceName: LinkedServiceReference;
+ /**
+ * The bucket of the interim Amazon S3 which will be used to store the unloaded data from Amazon
+ * Redshift source. The bucket must be in the same region as the Amazon Redshift source. Type:
+ * string (or Expression with resultType string).
+ */
+ bucketName: any;
+}
+
+/**
+ * Contains the possible cases for TabularSource.
+ */
+export type TabularSourceUnion = TabularSource | AmazonRedshiftSource | GoogleAdWordsSource | OracleServiceCloudSource | DynamicsAXSource | ResponsysSource | SalesforceMarketingCloudSource | VerticaSource | NetezzaSource | ZohoSource | XeroSource | SquareSource | SparkSource | ShopifySource | ServiceNowSource | QuickBooksSource | PrestoSource | PhoenixSource | PaypalSource | MarketoSource | AzureMariaDBSource | MariaDBSource | MagentoSource | JiraSource | ImpalaSource | HubspotSource | HiveSource | HBaseSource | GreenplumSource | GoogleBigQuerySource | EloquaSource | DrillSource | CouchbaseSource | ConcurSource | AzurePostgreSqlSource | AmazonMWSSource | CassandraSource | TeradataSource | AzureMySqlSource | SqlDWSource | SqlMISource | AzureSqlSource | SqlServerSource | SqlSource | SapTableSource | SapOpenHubSource | SapHanaSource | SapEccSource | SapCloudForCustomerSource | SalesforceSource | SapBwSource | SybaseSource | PostgreSqlSource | MySqlSource | OdbcSource | Db2Source | InformixSource | AzureTableSource;
+
+/**
+ * Copy activity sources of tabular type.
+ */
+export interface TabularSource {
/**
* Polymorphic Discriminator
*/
- type: "SparkSource";
+ type: "TabularSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -15200,20 +15977,16 @@ export interface SparkSource {
* Expression with resultType array of objects).
*/
additionalColumns?: AdditionalColumns[];
- /**
- * A query to retrieve data from source. Type: string (or Expression with resultType string).
- */
- query?: any;
}
/**
- * A copy activity Shopify Service source.
+ * A copy activity source for Amazon Redshift Source.
*/
-export interface ShopifySource {
+export interface AmazonRedshiftSource {
/**
* Polymorphic Discriminator
*/
- type: "ShopifySource";
+ type: "AmazonRedshiftSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -15239,19 +16012,25 @@ export interface ShopifySource {
*/
additionalColumns?: AdditionalColumns[];
/**
- * A query to retrieve data from source. Type: string (or Expression with resultType string).
+ * Database query. Type: string (or Expression with resultType string).
*/
query?: any;
+ /**
+ * The Amazon S3 settings needed for the interim Amazon S3 when copying from Amazon Redshift with
+ * unload. With this, data from Amazon Redshift source will be unloaded into S3 first and then
+ * copied into the targeted sink from the interim S3.
+ */
+ redshiftUnloadSettings?: RedshiftUnloadSettings;
}
/**
- * A copy activity ServiceNow server source.
+ * A copy activity Google AdWords service source.
*/
-export interface ServiceNowSource {
+export interface GoogleAdWordsSource {
/**
* Polymorphic Discriminator
*/
- type: "ServiceNowSource";
+ type: "GoogleAdWordsSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -15283,13 +16062,13 @@ export interface ServiceNowSource {
}
/**
- * A copy activity QuickBooks server source.
+ * A copy activity Oracle Service Cloud source.
*/
-export interface QuickBooksSource {
+export interface OracleServiceCloudSource {
/**
* Polymorphic Discriminator
*/
- type: "QuickBooksSource";
+ type: "OracleServiceCloudSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -15321,13 +16100,13 @@ export interface QuickBooksSource {
}
/**
- * A copy activity Presto server source.
+ * A copy activity Dynamics AX source.
*/
-export interface PrestoSource {
+export interface DynamicsAXSource {
/**
* Polymorphic Discriminator
*/
- type: "PrestoSource";
+ type: "DynamicsAXSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -15356,16 +16135,22 @@ export interface PrestoSource {
* A query to retrieve data from source. Type: string (or Expression with resultType string).
*/
query?: any;
+ /**
+ * The timeout (TimeSpan) to get an HTTP response. It is the timeout to get a response, not the
+ * timeout to read response data. Default value: 00:05:00. Type: string (or Expression with
+ * resultType string), pattern: ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ */
+ httpRequestTimeout?: any;
}
/**
- * A copy activity Phoenix server source.
+ * A copy activity Responsys source.
*/
-export interface PhoenixSource {
+export interface ResponsysSource {
/**
* Polymorphic Discriminator
*/
- type: "PhoenixSource";
+ type: "ResponsysSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -15397,13 +16182,13 @@ export interface PhoenixSource {
}
/**
- * A copy activity Paypal Service source.
+ * A copy activity Salesforce Marketing Cloud source.
*/
-export interface PaypalSource {
+export interface SalesforceMarketingCloudSource {
/**
* Polymorphic Discriminator
*/
- type: "PaypalSource";
+ type: "SalesforceMarketingCloudSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -15435,13 +16220,13 @@ export interface PaypalSource {
}
/**
- * A copy activity Marketo server source.
+ * A copy activity Vertica source.
*/
-export interface MarketoSource {
+export interface VerticaSource {
/**
* Polymorphic Discriminator
*/
- type: "MarketoSource";
+ type: "VerticaSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -15473,13 +16258,34 @@ export interface MarketoSource {
}
/**
- * A copy activity Azure MariaDB source.
+ * The settings that will be leveraged for Netezza source partitioning.
*/
-export interface AzureMariaDBSource {
+export interface NetezzaPartitionSettings {
+ /**
+ * The name of the column in integer type that will be used for proceeding range partitioning.
+ * Type: string (or Expression with resultType string).
+ */
+ partitionColumnName?: any;
+ /**
+ * The maximum value of column specified in partitionColumnName that will be used for proceeding
+ * range partitioning. Type: string (or Expression with resultType string).
+ */
+ partitionUpperBound?: any;
+ /**
+ * The minimum value of column specified in partitionColumnName that will be used for proceeding
+ * range partitioning. Type: string (or Expression with resultType string).
+ */
+ partitionLowerBound?: any;
+}
+
+/**
+ * A copy activity Netezza source.
+ */
+export interface NetezzaSource {
/**
* Polymorphic Discriminator
*/
- type: "AzureMariaDBSource";
+ type: "NetezzaSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -15508,16 +16314,25 @@ export interface AzureMariaDBSource {
* A query to retrieve data from source. Type: string (or Expression with resultType string).
*/
query?: any;
+ /**
+ * The partition mechanism that will be used for Netezza read in parallel. Possible values
+ * include: 'None', 'DataSlice', 'DynamicRange'
+ */
+ partitionOption?: NetezzaPartitionOption;
+ /**
+ * The settings that will be leveraged for Netezza source partitioning.
+ */
+ partitionSettings?: NetezzaPartitionSettings;
}
/**
- * A copy activity MariaDB server source.
+ * A copy activity Zoho server source.
*/
-export interface MariaDBSource {
+export interface ZohoSource {
/**
* Polymorphic Discriminator
*/
- type: "MariaDBSource";
+ type: "ZohoSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -15549,13 +16364,13 @@ export interface MariaDBSource {
}
/**
- * A copy activity Magento server source.
+ * A copy activity Xero Service source.
*/
-export interface MagentoSource {
+export interface XeroSource {
/**
* Polymorphic Discriminator
*/
- type: "MagentoSource";
+ type: "XeroSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -15587,13 +16402,13 @@ export interface MagentoSource {
}
/**
- * A copy activity Jira Service source.
+ * A copy activity Square Service source.
*/
-export interface JiraSource {
+export interface SquareSource {
/**
* Polymorphic Discriminator
*/
- type: "JiraSource";
+ type: "SquareSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -15625,13 +16440,13 @@ export interface JiraSource {
}
/**
- * A copy activity Impala server source.
+ * A copy activity Spark Server source.
*/
-export interface ImpalaSource {
+export interface SparkSource {
/**
* Polymorphic Discriminator
*/
- type: "ImpalaSource";
+ type: "SparkSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -15663,13 +16478,13 @@ export interface ImpalaSource {
}
/**
- * A copy activity Hubspot Service source.
+ * A copy activity Shopify Service source.
*/
-export interface HubspotSource {
+export interface ShopifySource {
/**
* Polymorphic Discriminator
*/
- type: "HubspotSource";
+ type: "ShopifySource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -15701,13 +16516,13 @@ export interface HubspotSource {
}
/**
- * A copy activity Hive Server source.
+ * A copy activity ServiceNow server source.
*/
-export interface HiveSource {
+export interface ServiceNowSource {
/**
* Polymorphic Discriminator
*/
- type: "HiveSource";
+ type: "ServiceNowSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -15739,13 +16554,13 @@ export interface HiveSource {
}
/**
- * A copy activity HBase server source.
+ * A copy activity QuickBooks server source.
*/
-export interface HBaseSource {
+export interface QuickBooksSource {
/**
* Polymorphic Discriminator
*/
- type: "HBaseSource";
+ type: "QuickBooksSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -15777,13 +16592,13 @@ export interface HBaseSource {
}
/**
- * A copy activity Greenplum Database source.
+ * A copy activity Presto server source.
*/
-export interface GreenplumSource {
+export interface PrestoSource {
/**
* Polymorphic Discriminator
*/
- type: "GreenplumSource";
+ type: "PrestoSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -15815,13 +16630,13 @@ export interface GreenplumSource {
}
/**
- * A copy activity Google BigQuery service source.
+ * A copy activity Phoenix server source.
*/
-export interface GoogleBigQuerySource {
+export interface PhoenixSource {
/**
* Polymorphic Discriminator
*/
- type: "GoogleBigQuerySource";
+ type: "PhoenixSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -15853,13 +16668,13 @@ export interface GoogleBigQuerySource {
}
/**
- * A copy activity Eloqua server source.
+ * A copy activity Paypal Service source.
*/
-export interface EloquaSource {
+export interface PaypalSource {
/**
* Polymorphic Discriminator
*/
- type: "EloquaSource";
+ type: "PaypalSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -15891,13 +16706,13 @@ export interface EloquaSource {
}
/**
- * A copy activity Drill server source.
+ * A copy activity Marketo server source.
*/
-export interface DrillSource {
+export interface MarketoSource {
/**
* Polymorphic Discriminator
*/
- type: "DrillSource";
+ type: "MarketoSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -15929,13 +16744,13 @@ export interface DrillSource {
}
/**
- * A copy activity Couchbase server source.
+ * A copy activity Azure MariaDB source.
*/
-export interface CouchbaseSource {
+export interface AzureMariaDBSource {
/**
* Polymorphic Discriminator
*/
- type: "CouchbaseSource";
+ type: "AzureMariaDBSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -15967,13 +16782,13 @@ export interface CouchbaseSource {
}
/**
- * A copy activity Concur Service source.
+ * A copy activity MariaDB server source.
*/
-export interface ConcurSource {
+export interface MariaDBSource {
/**
* Polymorphic Discriminator
*/
- type: "ConcurSource";
+ type: "MariaDBSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -16005,13 +16820,13 @@ export interface ConcurSource {
}
/**
- * A copy activity Azure PostgreSQL source.
+ * A copy activity Magento server source.
*/
-export interface AzurePostgreSqlSource {
+export interface MagentoSource {
/**
* Polymorphic Discriminator
*/
- type: "AzurePostgreSqlSource";
+ type: "MagentoSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -16043,13 +16858,13 @@ export interface AzurePostgreSqlSource {
}
/**
- * A copy activity Amazon Marketplace Web Service source.
+ * A copy activity Jira Service source.
*/
-export interface AmazonMWSSource {
+export interface JiraSource {
/**
* Polymorphic Discriminator
*/
- type: "AmazonMWSSource";
+ type: "JiraSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -16081,13 +16896,13 @@ export interface AmazonMWSSource {
}
/**
- * A copy activity source for a Cassandra database.
+ * A copy activity Impala server source.
*/
-export interface CassandraSource {
+export interface ImpalaSource {
/**
* Polymorphic Discriminator
*/
- type: "CassandraSource";
+ type: "ImpalaSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -16113,50 +16928,19 @@ export interface CassandraSource {
*/
additionalColumns?: AdditionalColumns[];
/**
- * Database query. Should be a SQL-92 query expression or Cassandra Query Language (CQL) command.
- * Type: string (or Expression with resultType string).
+ * A query to retrieve data from source. Type: string (or Expression with resultType string).
*/
query?: any;
- /**
- * The consistency level specifies how many Cassandra servers must respond to a read request
- * before returning data to the client application. Cassandra checks the specified number of
- * Cassandra servers for data to satisfy the read request. Must be one of
- * cassandraSourceReadConsistencyLevels. The default value is 'ONE'. It is case-insensitive.
- * Possible values include: 'ALL', 'EACH_QUORUM', 'QUORUM', 'LOCAL_QUORUM', 'ONE', 'TWO',
- * 'THREE', 'LOCAL_ONE', 'SERIAL', 'LOCAL_SERIAL'
- */
- consistencyLevel?: CassandraSourceReadConsistencyLevels;
-}
-
-/**
- * The settings that will be leveraged for teradata source partitioning.
- */
-export interface TeradataPartitionSettings {
- /**
- * The name of the column that will be used for proceeding range or hash partitioning. Type:
- * string (or Expression with resultType string).
- */
- partitionColumnName?: any;
- /**
- * The maximum value of column specified in partitionColumnName that will be used for proceeding
- * range partitioning. Type: string (or Expression with resultType string).
- */
- partitionUpperBound?: any;
- /**
- * The minimum value of column specified in partitionColumnName that will be used for proceeding
- * range partitioning. Type: string (or Expression with resultType string).
- */
- partitionLowerBound?: any;
}
/**
- * A copy activity Teradata source.
+ * A copy activity Hubspot Service source.
*/
-export interface TeradataSource {
+export interface HubspotSource {
/**
* Polymorphic Discriminator
*/
- type: "TeradataSource";
+ type: "HubspotSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -16182,28 +16966,19 @@ export interface TeradataSource {
*/
additionalColumns?: AdditionalColumns[];
/**
- * Teradata query. Type: string (or Expression with resultType string).
+ * A query to retrieve data from source. Type: string (or Expression with resultType string).
*/
query?: any;
- /**
- * The partition mechanism that will be used for teradata read in parallel. Possible values
- * include: 'None', 'Hash', 'DynamicRange'
- */
- partitionOption?: TeradataPartitionOption;
- /**
- * The settings that will be leveraged for teradata source partitioning.
- */
- partitionSettings?: TeradataPartitionSettings;
}
/**
- * A copy activity Azure MySQL source.
+ * A copy activity Hive Server source.
*/
-export interface AzureMySqlSource {
+export interface HiveSource {
/**
* Polymorphic Discriminator
*/
- type: "AzureMySqlSource";
+ type: "HiveSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -16229,19 +17004,19 @@ export interface AzureMySqlSource {
*/
additionalColumns?: AdditionalColumns[];
/**
- * Database query. Type: string (or Expression with resultType string).
+ * A query to retrieve data from source. Type: string (or Expression with resultType string).
*/
query?: any;
}
/**
- * A copy activity SQL Data Warehouse source.
+ * A copy activity HBase server source.
*/
-export interface SqlDWSource {
+export interface HBaseSource {
/**
* Polymorphic Discriminator
*/
- type: "SqlDWSource";
+ type: "HBaseSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -16267,45 +17042,19 @@ export interface SqlDWSource {
*/
additionalColumns?: AdditionalColumns[];
/**
- * SQL Data Warehouse reader query. Type: string (or Expression with resultType string).
- */
- sqlReaderQuery?: any;
- /**
- * Name of the stored procedure for a SQL Data Warehouse source. This cannot be used at the same
- * time as SqlReaderQuery. Type: string (or Expression with resultType string).
- */
- sqlReaderStoredProcedureName?: any;
- /**
- * Value and type setting for stored procedure parameters. Example: "{Parameter1: {value: "1",
- * type: "int"}}". Type: object (or Expression with resultType object), itemType:
- * StoredProcedureParameter.
- */
- storedProcedureParameters?: any;
-}
-
-/**
- * SQL stored procedure parameter.
- */
-export interface StoredProcedureParameter {
- /**
- * Stored procedure parameter value. Type: string (or Expression with resultType string).
- */
- value?: any;
- /**
- * Stored procedure parameter type. Possible values include: 'String', 'Int', 'Int64', 'Decimal',
- * 'Guid', 'Boolean', 'Date'
+ * A query to retrieve data from source. Type: string (or Expression with resultType string).
*/
- type?: StoredProcedureParameterType;
+ query?: any;
}
/**
- * A copy activity Azure SQL Managed Instance source.
+ * A copy activity Greenplum Database source.
*/
-export interface SqlMISource {
+export interface GreenplumSource {
/**
* Polymorphic Discriminator
*/
- type: "SqlMISource";
+ type: "GreenplumSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -16331,33 +17080,19 @@ export interface SqlMISource {
*/
additionalColumns?: AdditionalColumns[];
/**
- * SQL reader query. Type: string (or Expression with resultType string).
- */
- sqlReaderQuery?: any;
- /**
- * Name of the stored procedure for a Azure SQL Managed Instance source. This cannot be used at
- * the same time as SqlReaderQuery. Type: string (or Expression with resultType string).
- */
- sqlReaderStoredProcedureName?: any;
- /**
- * Value and type setting for stored procedure parameters. Example: "{Parameter1: {value: "1",
- * type: "int"}}".
- */
- storedProcedureParameters?: { [propertyName: string]: StoredProcedureParameter };
- /**
- * Which additional types to produce.
+ * A query to retrieve data from source. Type: string (or Expression with resultType string).
*/
- produceAdditionalTypes?: any;
+ query?: any;
}
/**
- * A copy activity Azure SQL source.
+ * A copy activity Google BigQuery service source.
*/
-export interface AzureSqlSource {
+export interface GoogleBigQuerySource {
/**
* Polymorphic Discriminator
*/
- type: "AzureSqlSource";
+ type: "GoogleBigQuerySource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -16383,33 +17118,19 @@ export interface AzureSqlSource {
*/
additionalColumns?: AdditionalColumns[];
/**
- * SQL reader query. Type: string (or Expression with resultType string).
- */
- sqlReaderQuery?: any;
- /**
- * Name of the stored procedure for a SQL Database source. This cannot be used at the same time
- * as SqlReaderQuery. Type: string (or Expression with resultType string).
- */
- sqlReaderStoredProcedureName?: any;
- /**
- * Value and type setting for stored procedure parameters. Example: "{Parameter1: {value: "1",
- * type: "int"}}".
- */
- storedProcedureParameters?: { [propertyName: string]: StoredProcedureParameter };
- /**
- * Which additional types to produce.
+ * A query to retrieve data from source. Type: string (or Expression with resultType string).
*/
- produceAdditionalTypes?: any;
+ query?: any;
}
/**
- * A copy activity SQL server source.
+ * A copy activity Eloqua server source.
*/
-export interface SqlServerSource {
+export interface EloquaSource {
/**
* Polymorphic Discriminator
*/
- type: "SqlServerSource";
+ type: "EloquaSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -16435,33 +17156,19 @@ export interface SqlServerSource {
*/
additionalColumns?: AdditionalColumns[];
/**
- * SQL reader query. Type: string (or Expression with resultType string).
- */
- sqlReaderQuery?: any;
- /**
- * Name of the stored procedure for a SQL Database source. This cannot be used at the same time
- * as SqlReaderQuery. Type: string (or Expression with resultType string).
- */
- sqlReaderStoredProcedureName?: any;
- /**
- * Value and type setting for stored procedure parameters. Example: "{Parameter1: {value: "1",
- * type: "int"}}".
- */
- storedProcedureParameters?: { [propertyName: string]: StoredProcedureParameter };
- /**
- * Which additional types to produce.
+ * A query to retrieve data from source. Type: string (or Expression with resultType string).
*/
- produceAdditionalTypes?: any;
+ query?: any;
}
/**
- * A copy activity SQL source.
+ * A copy activity Drill server source.
*/
-export interface SqlSource {
+export interface DrillSource {
/**
* Polymorphic Discriminator
*/
- type: "SqlSource";
+ type: "DrillSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -16487,61 +17194,19 @@ export interface SqlSource {
*/
additionalColumns?: AdditionalColumns[];
/**
- * SQL reader query. Type: string (or Expression with resultType string).
- */
- sqlReaderQuery?: any;
- /**
- * Name of the stored procedure for a SQL Database source. This cannot be used at the same time
- * as SqlReaderQuery. Type: string (or Expression with resultType string).
- */
- sqlReaderStoredProcedureName?: any;
- /**
- * Value and type setting for stored procedure parameters. Example: "{Parameter1: {value: "1",
- * type: "int"}}".
- */
- storedProcedureParameters?: { [propertyName: string]: StoredProcedureParameter };
- /**
- * Specifies the transaction locking behavior for the SQL source. Allowed values:
- * ReadCommitted/ReadUncommitted/RepeatableRead/Serializable/Snapshot. The default value is
- * ReadCommitted. Type: string (or Expression with resultType string).
- */
- isolationLevel?: any;
-}
-
-/**
- * The settings that will be leveraged for SAP table source partitioning.
- */
-export interface SapTablePartitionSettings {
- /**
- * The name of the column that will be used for proceeding range partitioning. Type: string (or
- * Expression with resultType string).
- */
- partitionColumnName?: any;
- /**
- * The maximum value of column specified in partitionColumnName that will be used for proceeding
- * range partitioning. Type: string (or Expression with resultType string).
- */
- partitionUpperBound?: any;
- /**
- * The minimum value of column specified in partitionColumnName that will be used for proceeding
- * range partitioning. Type: string (or Expression with resultType string).
- */
- partitionLowerBound?: any;
- /**
- * The maximum value of partitions the table will be split into. Type: integer (or Expression
- * with resultType string).
+ * A query to retrieve data from source. Type: string (or Expression with resultType string).
*/
- maxPartitionsNumber?: any;
+ query?: any;
}
/**
- * A copy activity source for SAP Table source.
+ * A copy activity Couchbase server source.
*/
-export interface SapTableSource {
+export interface CouchbaseSource {
/**
* Polymorphic Discriminator
*/
- type: "SapTableSource";
+ type: "CouchbaseSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -16567,54 +17232,57 @@ export interface SapTableSource {
*/
additionalColumns?: AdditionalColumns[];
/**
- * The number of rows to be retrieved. Type: integer(or Expression with resultType integer).
+ * A query to retrieve data from source. Type: string (or Expression with resultType string).
*/
- rowCount?: any;
+ query?: any;
+}
+
+/**
+ * A copy activity Concur Service source.
+ */
+export interface ConcurSource {
/**
- * The number of rows that will be skipped. Type: integer (or Expression with resultType
- * integer).
+ * Polymorphic Discriminator
*/
- rowSkips?: any;
+ type: "ConcurSource";
/**
- * The fields of the SAP table that will be retrieved. For example, column0, column1. Type:
- * string (or Expression with resultType string).
+ * Source retry count. Type: integer (or Expression with resultType integer).
*/
- rfcTableFields?: any;
+ sourceRetryCount?: any;
/**
- * The options for the filtering of the SAP Table. For example, COLUMN0 EQ SOME VALUE. Type:
- * string (or Expression with resultType string).
+ * Source retry wait. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- rfcTableOptions?: any;
+ sourceRetryWait?: any;
/**
- * Specifies the maximum number of rows that will be retrieved at a time when retrieving data
- * from SAP Table. Type: integer (or Expression with resultType integer).
+ * The maximum concurrent connection count for the source data store. Type: integer (or
+ * Expression with resultType integer).
*/
- batchSize?: any;
+ maxConcurrentConnections?: any;
/**
- * Specifies the custom RFC function module that will be used to read data from SAP Table. Type:
- * string (or Expression with resultType string).
+ * Query timeout. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- customRfcReadTableFunctionModule?: any;
+ queryTimeout?: any;
/**
- * The partition mechanism that will be used for SAP table read in parallel. Possible values
- * include: 'None', 'PartitionOnInt', 'PartitionOnCalendarYear', 'PartitionOnCalendarMonth',
- * 'PartitionOnCalendarDate', 'PartitionOnTime'
+ * Specifies the additional columns to be added to source data. Type: array of objects (or
+ * Expression with resultType array of objects).
*/
- partitionOption?: SapTablePartitionOption;
+ additionalColumns?: AdditionalColumns[];
/**
- * The settings that will be leveraged for SAP table source partitioning.
+ * A query to retrieve data from source. Type: string (or Expression with resultType string).
*/
- partitionSettings?: SapTablePartitionSettings;
+ query?: any;
}
/**
- * A copy activity source for SAP Business Warehouse Open Hub Destination source.
+ * A copy activity Azure PostgreSQL source.
*/
-export interface SapOpenHubSource {
+export interface AzurePostgreSqlSource {
/**
* Polymorphic Discriminator
*/
- type: "SapOpenHubSource";
+ type: "AzurePostgreSqlSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -16640,37 +17308,19 @@ export interface SapOpenHubSource {
*/
additionalColumns?: AdditionalColumns[];
/**
- * Whether to exclude the records of the last request. The default value is true. Type: boolean
- * (or Expression with resultType boolean).
- */
- excludeLastRequest?: any;
- /**
- * The ID of request for delta loading. Once it is set, only data with requestId larger than the
- * value of this property will be retrieved. The default value is 0. Type: integer (or Expression
- * with resultType integer ).
- */
- baseRequestId?: any;
-}
-
-/**
- * The settings that will be leveraged for SAP HANA source partitioning.
- */
-export interface SapHanaPartitionSettings {
- /**
- * The name of the column that will be used for proceeding range partitioning. Type: string (or
- * Expression with resultType string).
+ * A query to retrieve data from source. Type: string (or Expression with resultType string).
*/
- partitionColumnName?: any;
+ query?: any;
}
/**
- * A copy activity source for SAP HANA source.
+ * A copy activity Amazon Marketplace Web Service source.
*/
-export interface SapHanaSource {
+export interface AmazonMWSSource {
/**
* Polymorphic Discriminator
*/
- type: "SapHanaSource";
+ type: "AmazonMWSSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -16696,33 +17346,19 @@ export interface SapHanaSource {
*/
additionalColumns?: AdditionalColumns[];
/**
- * SAP HANA Sql query. Type: string (or Expression with resultType string).
+ * A query to retrieve data from source. Type: string (or Expression with resultType string).
*/
query?: any;
- /**
- * The packet size of data read from SAP HANA. Type: integer(or Expression with resultType
- * integer).
- */
- packetSize?: any;
- /**
- * The partition mechanism that will be used for SAP HANA read in parallel. Possible values
- * include: 'None', 'PhysicalPartitionsOfTable', 'SapHanaDynamicRange'
- */
- partitionOption?: SapHanaPartitionOption;
- /**
- * The settings that will be leveraged for SAP HANA source partitioning.
- */
- partitionSettings?: SapHanaPartitionSettings;
}
/**
- * A copy activity source for SAP ECC source.
+ * A copy activity source for a Cassandra database.
*/
-export interface SapEccSource {
+export interface CassandraSource {
/**
* Polymorphic Discriminator
*/
- type: "SapEccSource";
+ type: "CassandraSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -16748,20 +17384,50 @@ export interface SapEccSource {
*/
additionalColumns?: AdditionalColumns[];
/**
- * SAP ECC OData query. For example, "$top=1". Type: string (or Expression with resultType
- * string).
+ * Database query. Should be a SQL-92 query expression or Cassandra Query Language (CQL) command.
+ * Type: string (or Expression with resultType string).
*/
query?: any;
+ /**
+ * The consistency level specifies how many Cassandra servers must respond to a read request
+ * before returning data to the client application. Cassandra checks the specified number of
+ * Cassandra servers for data to satisfy the read request. Must be one of
+ * cassandraSourceReadConsistencyLevels. The default value is 'ONE'. It is case-insensitive.
+ * Possible values include: 'ALL', 'EACH_QUORUM', 'QUORUM', 'LOCAL_QUORUM', 'ONE', 'TWO',
+ * 'THREE', 'LOCAL_ONE', 'SERIAL', 'LOCAL_SERIAL'
+ */
+ consistencyLevel?: CassandraSourceReadConsistencyLevels;
}
/**
- * A copy activity source for SAP Cloud for Customer source.
+ * The settings that will be leveraged for teradata source partitioning.
*/
-export interface SapCloudForCustomerSource {
+export interface TeradataPartitionSettings {
+ /**
+ * The name of the column that will be used for proceeding range or hash partitioning. Type:
+ * string (or Expression with resultType string).
+ */
+ partitionColumnName?: any;
+ /**
+ * The maximum value of column specified in partitionColumnName that will be used for proceeding
+ * range partitioning. Type: string (or Expression with resultType string).
+ */
+ partitionUpperBound?: any;
+ /**
+ * The minimum value of column specified in partitionColumnName that will be used for proceeding
+ * range partitioning. Type: string (or Expression with resultType string).
+ */
+ partitionLowerBound?: any;
+}
+
+/**
+ * A copy activity Teradata source.
+ */
+export interface TeradataSource {
/**
* Polymorphic Discriminator
*/
- type: "SapCloudForCustomerSource";
+ type: "TeradataSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -16787,20 +17453,28 @@ export interface SapCloudForCustomerSource {
*/
additionalColumns?: AdditionalColumns[];
/**
- * SAP Cloud for Customer OData query. For example, "$top=1". Type: string (or Expression with
- * resultType string).
+ * Teradata query. Type: string (or Expression with resultType string).
*/
query?: any;
+ /**
+ * The partition mechanism that will be used for teradata read in parallel. Possible values
+ * include: 'None', 'Hash', 'DynamicRange'
+ */
+ partitionOption?: TeradataPartitionOption;
+ /**
+ * The settings that will be leveraged for teradata source partitioning.
+ */
+ partitionSettings?: TeradataPartitionSettings;
}
/**
- * A copy activity Salesforce source.
+ * A copy activity Azure MySQL source.
*/
-export interface SalesforceSource {
+export interface AzureMySqlSource {
/**
* Polymorphic Discriminator
*/
- type: "SalesforceSource";
+ type: "AzureMySqlSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -16829,21 +17503,42 @@ export interface SalesforceSource {
* Database query. Type: string (or Expression with resultType string).
*/
query?: any;
+}
+
+/**
+ * The settings that will be leveraged for Sql source partitioning.
+ */
+export interface SqlPartitionSettings {
/**
- * The read behavior for the operation. Default is Query. Possible values include: 'Query',
- * 'QueryAll'
+ * The name of the column in integer or datetime type that will be used for proceeding
+ * partitioning. If not specified, the primary key of the table is auto-detected and used as the
+ * partition column. Type: string (or Expression with resultType string).
*/
- readBehavior?: SalesforceSourceReadBehavior;
+ partitionColumnName?: any;
+ /**
+ * The maximum value of the partition column for partition range splitting. This value is used to
+ * decide the partition stride, not for filtering the rows in table. All rows in the table or
+ * query result will be partitioned and copied. Type: string (or Expression with resultType
+ * string).
+ */
+ partitionUpperBound?: any;
+ /**
+ * The minimum value of the partition column for partition range splitting. This value is used to
+ * decide the partition stride, not for filtering the rows in table. All rows in the table or
+ * query result will be partitioned and copied. Type: string (or Expression with resultType
+ * string).
+ */
+ partitionLowerBound?: any;
}
/**
- * A copy activity source for SapBW server via MDX.
+ * A copy activity SQL Data Warehouse source.
*/
-export interface SapBwSource {
+export interface SqlDWSource {
/**
* Polymorphic Discriminator
*/
- type: "SapBwSource";
+ type: "SqlDWSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -16869,19 +17564,54 @@ export interface SapBwSource {
*/
additionalColumns?: AdditionalColumns[];
/**
- * MDX query. Type: string (or Expression with resultType string).
+ * SQL Data Warehouse reader query. Type: string (or Expression with resultType string).
*/
- query?: any;
+ sqlReaderQuery?: any;
+ /**
+ * Name of the stored procedure for a SQL Data Warehouse source. This cannot be used at the same
+ * time as SqlReaderQuery. Type: string (or Expression with resultType string).
+ */
+ sqlReaderStoredProcedureName?: any;
+ /**
+ * Value and type setting for stored procedure parameters. Example: "{Parameter1: {value: "1",
+ * type: "int"}}". Type: object (or Expression with resultType object), itemType:
+ * StoredProcedureParameter.
+ */
+ storedProcedureParameters?: any;
+ /**
+ * The partition mechanism that will be used for Sql read in parallel. Possible values include:
+ * 'None', 'PhysicalPartitionsOfTable', 'DynamicRange'
+ */
+ partitionOption?: SqlPartitionOption;
+ /**
+ * The settings that will be leveraged for Sql source partitioning.
+ */
+ partitionSettings?: SqlPartitionSettings;
}
/**
- * A copy activity source for Sybase databases.
+ * SQL stored procedure parameter.
*/
-export interface SybaseSource {
+export interface StoredProcedureParameter {
+ /**
+ * Stored procedure parameter value. Type: string (or Expression with resultType string).
+ */
+ value?: any;
+ /**
+ * Stored procedure parameter type. Possible values include: 'String', 'Int', 'Int64', 'Decimal',
+ * 'Guid', 'Boolean', 'Date'
+ */
+ type?: StoredProcedureParameterType;
+}
+
+/**
+ * A copy activity Azure SQL Managed Instance source.
+ */
+export interface SqlMISource {
/**
* Polymorphic Discriminator
*/
- type: "SybaseSource";
+ type: "SqlMISource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -16907,19 +17637,42 @@ export interface SybaseSource {
*/
additionalColumns?: AdditionalColumns[];
/**
- * Database query. Type: string (or Expression with resultType string).
+ * SQL reader query. Type: string (or Expression with resultType string).
*/
- query?: any;
+ sqlReaderQuery?: any;
+ /**
+ * Name of the stored procedure for a Azure SQL Managed Instance source. This cannot be used at
+ * the same time as SqlReaderQuery. Type: string (or Expression with resultType string).
+ */
+ sqlReaderStoredProcedureName?: any;
+ /**
+ * Value and type setting for stored procedure parameters. Example: "{Parameter1: {value: "1",
+ * type: "int"}}".
+ */
+ storedProcedureParameters?: { [propertyName: string]: StoredProcedureParameter };
+ /**
+ * Which additional types to produce.
+ */
+ produceAdditionalTypes?: any;
+ /**
+ * The partition mechanism that will be used for Sql read in parallel. Possible values include:
+ * 'None', 'PhysicalPartitionsOfTable', 'DynamicRange'
+ */
+ partitionOption?: SqlPartitionOption;
+ /**
+ * The settings that will be leveraged for Sql source partitioning.
+ */
+ partitionSettings?: SqlPartitionSettings;
}
/**
- * A copy activity source for PostgreSQL databases.
+ * A copy activity Azure SQL source.
*/
-export interface PostgreSqlSource {
+export interface AzureSqlSource {
/**
* Polymorphic Discriminator
*/
- type: "PostgreSqlSource";
+ type: "AzureSqlSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -16945,19 +17698,42 @@ export interface PostgreSqlSource {
*/
additionalColumns?: AdditionalColumns[];
/**
- * Database query. Type: string (or Expression with resultType string).
+ * SQL reader query. Type: string (or Expression with resultType string).
*/
- query?: any;
+ sqlReaderQuery?: any;
+ /**
+ * Name of the stored procedure for a SQL Database source. This cannot be used at the same time
+ * as SqlReaderQuery. Type: string (or Expression with resultType string).
+ */
+ sqlReaderStoredProcedureName?: any;
+ /**
+ * Value and type setting for stored procedure parameters. Example: "{Parameter1: {value: "1",
+ * type: "int"}}".
+ */
+ storedProcedureParameters?: { [propertyName: string]: StoredProcedureParameter };
+ /**
+ * Which additional types to produce.
+ */
+ produceAdditionalTypes?: any;
+ /**
+ * The partition mechanism that will be used for Sql read in parallel. Possible values include:
+ * 'None', 'PhysicalPartitionsOfTable', 'DynamicRange'
+ */
+ partitionOption?: SqlPartitionOption;
+ /**
+ * The settings that will be leveraged for Sql source partitioning.
+ */
+ partitionSettings?: SqlPartitionSettings;
}
/**
- * A copy activity source for MySQL databases.
+ * A copy activity SQL server source.
*/
-export interface MySqlSource {
+export interface SqlServerSource {
/**
* Polymorphic Discriminator
*/
- type: "MySqlSource";
+ type: "SqlServerSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -16983,19 +17759,42 @@ export interface MySqlSource {
*/
additionalColumns?: AdditionalColumns[];
/**
- * Database query. Type: string (or Expression with resultType string).
+ * SQL reader query. Type: string (or Expression with resultType string).
*/
- query?: any;
+ sqlReaderQuery?: any;
+ /**
+ * Name of the stored procedure for a SQL Database source. This cannot be used at the same time
+ * as SqlReaderQuery. Type: string (or Expression with resultType string).
+ */
+ sqlReaderStoredProcedureName?: any;
+ /**
+ * Value and type setting for stored procedure parameters. Example: "{Parameter1: {value: "1",
+ * type: "int"}}".
+ */
+ storedProcedureParameters?: { [propertyName: string]: StoredProcedureParameter };
+ /**
+ * Which additional types to produce.
+ */
+ produceAdditionalTypes?: any;
+ /**
+ * The partition mechanism that will be used for Sql read in parallel. Possible values include:
+ * 'None', 'PhysicalPartitionsOfTable', 'DynamicRange'
+ */
+ partitionOption?: SqlPartitionOption;
+ /**
+ * The settings that will be leveraged for Sql source partitioning.
+ */
+ partitionSettings?: SqlPartitionSettings;
}
/**
- * A copy activity source for ODBC databases.
+ * A copy activity SQL source.
*/
-export interface OdbcSource {
+export interface SqlSource {
/**
* Polymorphic Discriminator
*/
- type: "OdbcSource";
+ type: "SqlSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -17021,57 +17820,70 @@ export interface OdbcSource {
*/
additionalColumns?: AdditionalColumns[];
/**
- * Database query. Type: string (or Expression with resultType string).
+ * SQL reader query. Type: string (or Expression with resultType string).
*/
- query?: any;
-}
-
-/**
- * A copy activity source for Db2 databases.
- */
-export interface Db2Source {
+ sqlReaderQuery?: any;
/**
- * Polymorphic Discriminator
+ * Name of the stored procedure for a SQL Database source. This cannot be used at the same time
+ * as SqlReaderQuery. Type: string (or Expression with resultType string).
*/
- type: "Db2Source";
+ sqlReaderStoredProcedureName?: any;
/**
- * Source retry count. Type: integer (or Expression with resultType integer).
+ * Value and type setting for stored procedure parameters. Example: "{Parameter1: {value: "1",
+ * type: "int"}}".
*/
- sourceRetryCount?: any;
+ storedProcedureParameters?: { [propertyName: string]: StoredProcedureParameter };
/**
- * Source retry wait. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * Specifies the transaction locking behavior for the SQL source. Allowed values:
+ * ReadCommitted/ReadUncommitted/RepeatableRead/Serializable/Snapshot. The default value is
+ * ReadCommitted. Type: string (or Expression with resultType string).
*/
- sourceRetryWait?: any;
+ isolationLevel?: any;
/**
- * The maximum concurrent connection count for the source data store. Type: integer (or
- * Expression with resultType integer).
+ * The partition mechanism that will be used for Sql read in parallel. Possible values include:
+ * 'None', 'PhysicalPartitionsOfTable', 'DynamicRange'
*/
- maxConcurrentConnections?: any;
+ partitionOption?: SqlPartitionOption;
/**
- * Query timeout. Type: string (or Expression with resultType string), pattern:
- * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ * The settings that will be leveraged for Sql source partitioning.
*/
- queryTimeout?: any;
+ partitionSettings?: SqlPartitionSettings;
+}
+
+/**
+ * The settings that will be leveraged for SAP table source partitioning.
+ */
+export interface SapTablePartitionSettings {
/**
- * Specifies the additional columns to be added to source data. Type: array of objects (or
- * Expression with resultType array of objects).
+ * The name of the column that will be used for proceeding range partitioning. Type: string (or
+ * Expression with resultType string).
*/
- additionalColumns?: AdditionalColumns[];
+ partitionColumnName?: any;
/**
- * Database query. Type: string (or Expression with resultType string).
+ * The maximum value of column specified in partitionColumnName that will be used for proceeding
+ * range partitioning. Type: string (or Expression with resultType string).
*/
- query?: any;
+ partitionUpperBound?: any;
+ /**
+ * The minimum value of column specified in partitionColumnName that will be used for proceeding
+ * range partitioning. Type: string (or Expression with resultType string).
+ */
+ partitionLowerBound?: any;
+ /**
+ * The maximum value of partitions the table will be split into. Type: integer (or Expression
+ * with resultType string).
+ */
+ maxPartitionsNumber?: any;
}
/**
- * A copy activity source for Informix.
+ * A copy activity source for SAP Table source.
*/
-export interface InformixSource {
+export interface SapTableSource {
/**
* Polymorphic Discriminator
*/
- type: "InformixSource";
+ type: "SapTableSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -17097,19 +17909,54 @@ export interface InformixSource {
*/
additionalColumns?: AdditionalColumns[];
/**
- * Database query. Type: string (or Expression with resultType string).
+ * The number of rows to be retrieved. Type: integer(or Expression with resultType integer).
+ */
+ rowCount?: any;
+ /**
+ * The number of rows that will be skipped. Type: integer (or Expression with resultType
+ * integer).
+ */
+ rowSkips?: any;
+ /**
+ * The fields of the SAP table that will be retrieved. For example, column0, column1. Type:
+ * string (or Expression with resultType string).
+ */
+ rfcTableFields?: any;
+ /**
+ * The options for the filtering of the SAP Table. For example, COLUMN0 EQ SOME VALUE. Type:
+ * string (or Expression with resultType string).
+ */
+ rfcTableOptions?: any;
+ /**
+ * Specifies the maximum number of rows that will be retrieved at a time when retrieving data
+ * from SAP Table. Type: integer (or Expression with resultType integer).
+ */
+ batchSize?: any;
+ /**
+ * Specifies the custom RFC function module that will be used to read data from SAP Table. Type:
+ * string (or Expression with resultType string).
+ */
+ customRfcReadTableFunctionModule?: any;
+ /**
+ * The partition mechanism that will be used for SAP table read in parallel. Possible values
+ * include: 'None', 'PartitionOnInt', 'PartitionOnCalendarYear', 'PartitionOnCalendarMonth',
+ * 'PartitionOnCalendarDate', 'PartitionOnTime'
+ */
+ partitionOption?: SapTablePartitionOption;
+ /**
+ * The settings that will be leveraged for SAP table source partitioning.
*/
- query?: any;
+ partitionSettings?: SapTablePartitionSettings;
}
/**
- * A copy activity Azure Table source.
+ * A copy activity source for SAP Business Warehouse Open Hub Destination source.
*/
-export interface AzureTableSource {
+export interface SapOpenHubSource {
/**
* Polymorphic Discriminator
*/
- type: "AzureTableSource";
+ type: "SapOpenHubSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -17135,528 +17982,521 @@ export interface AzureTableSource {
*/
additionalColumns?: AdditionalColumns[];
/**
- * Azure Table source query. Type: string (or Expression with resultType string).
+ * Whether to exclude the records of the last request. The default value is true. Type: boolean
+ * (or Expression with resultType boolean).
*/
- azureTableSourceQuery?: any;
+ excludeLastRequest?: any;
/**
- * Azure Table source ignore table not found. Type: boolean (or Expression with resultType
- * boolean).
+ * The ID of request for delta loading. Once it is set, only data with requestId larger than the
+ * value of this property will be retrieved. The default value is 0. Type: integer (or Expression
+ * with resultType integer ).
*/
- azureTableSourceIgnoreTableNotFound?: any;
+ baseRequestId?: any;
}
/**
- * Contains the possible cases for StoreReadSettings.
- */
-export type StoreReadSettingsUnion = StoreReadSettings | HdfsReadSettings | HttpReadSettings | SftpReadSettings | FtpReadSettings | GoogleCloudStorageReadSettings | AzureFileStorageReadSettings | FileServerReadSettings | AmazonS3ReadSettings | AzureDataLakeStoreReadSettings | AzureBlobFSReadSettings | AzureBlobStorageReadSettings;
-
-/**
- * Connector read setting.
+ * The settings that will be leveraged for SAP HANA source partitioning.
*/
-export interface StoreReadSettings {
- /**
- * Polymorphic Discriminator
- */
- type: "StoreReadSettings";
- /**
- * The maximum concurrent connection count for the source data store. Type: integer (or
- * Expression with resultType integer).
- */
- maxConcurrentConnections?: any;
+export interface SapHanaPartitionSettings {
/**
- * Describes unknown properties. The value of an unknown property can be of "any" type.
+ * The name of the column that will be used for proceeding range partitioning. Type: string (or
+ * Expression with resultType string).
*/
- [property: string]: any;
+ partitionColumnName?: any;
}
/**
- * HDFS read settings.
+ * A copy activity source for SAP HANA source.
*/
-export interface HdfsReadSettings {
+export interface SapHanaSource {
/**
* Polymorphic Discriminator
*/
- type: "HdfsReadSettings";
+ type: "SapHanaSource";
/**
- * The maximum concurrent connection count for the source data store. Type: integer (or
- * Expression with resultType integer).
+ * Source retry count. Type: integer (or Expression with resultType integer).
*/
- maxConcurrentConnections?: any;
+ sourceRetryCount?: any;
/**
- * If true, files under the folder path will be read recursively. Default is true. Type: boolean
- * (or Expression with resultType boolean).
+ * Source retry wait. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- recursive?: any;
+ sourceRetryWait?: any;
/**
- * HDFS wildcardFolderPath. Type: string (or Expression with resultType string).
+ * The maximum concurrent connection count for the source data store. Type: integer (or
+ * Expression with resultType integer).
*/
- wildcardFolderPath?: any;
+ maxConcurrentConnections?: any;
/**
- * HDFS wildcardFileName. Type: string (or Expression with resultType string).
+ * Query timeout. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- wildcardFileName?: any;
+ queryTimeout?: any;
/**
- * Point to a text file that lists each file (relative path to the path configured in the
- * dataset) that you want to copy. Type: string (or Expression with resultType string).
+ * Specifies the additional columns to be added to source data. Type: array of objects (or
+ * Expression with resultType array of objects).
*/
- fileListPath?: any;
+ additionalColumns?: AdditionalColumns[];
/**
- * Indicates whether to enable partition discovery.
+ * SAP HANA Sql query. Type: string (or Expression with resultType string).
*/
- enablePartitionDiscovery?: boolean;
+ query?: any;
/**
- * The start of file's modified datetime. Type: string (or Expression with resultType string).
+ * The packet size of data read from SAP HANA. Type: integer(or Expression with resultType
+ * integer).
*/
- modifiedDatetimeStart?: any;
+ packetSize?: any;
/**
- * The end of file's modified datetime. Type: string (or Expression with resultType string).
+ * The partition mechanism that will be used for SAP HANA read in parallel. Possible values
+ * include: 'None', 'PhysicalPartitionsOfTable', 'SapHanaDynamicRange'
*/
- modifiedDatetimeEnd?: any;
+ partitionOption?: SapHanaPartitionOption;
/**
- * Specifies Distcp-related settings.
+ * The settings that will be leveraged for SAP HANA source partitioning.
*/
- distcpSettings?: DistcpSettings;
+ partitionSettings?: SapHanaPartitionSettings;
}
/**
- * Sftp read settings.
+ * A copy activity source for SAP ECC source.
*/
-export interface HttpReadSettings {
+export interface SapEccSource {
/**
* Polymorphic Discriminator
*/
- type: "HttpReadSettings";
+ type: "SapEccSource";
+ /**
+ * Source retry count. Type: integer (or Expression with resultType integer).
+ */
+ sourceRetryCount?: any;
+ /**
+ * Source retry wait. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ */
+ sourceRetryWait?: any;
/**
* The maximum concurrent connection count for the source data store. Type: integer (or
* Expression with resultType integer).
*/
maxConcurrentConnections?: any;
/**
- * The HTTP method used to call the RESTful API. The default is GET. Type: string (or Expression
- * with resultType string).
+ * Query timeout. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- requestMethod?: any;
+ queryTimeout?: any;
/**
- * The HTTP request body to the RESTful API if requestMethod is POST. Type: string (or Expression
- * with resultType string).
+ * Specifies the additional columns to be added to source data. Type: array of objects (or
+ * Expression with resultType array of objects).
*/
- requestBody?: any;
+ additionalColumns?: AdditionalColumns[];
/**
- * The additional HTTP headers in the request to the RESTful API. Type: string (or Expression
- * with resultType string).
+ * SAP ECC OData query. For example, "$top=1". Type: string (or Expression with resultType
+ * string).
*/
- additionalHeaders?: any;
+ query?: any;
/**
- * Specifies the timeout for a HTTP client to get HTTP response from HTTP server.
+ * The timeout (TimeSpan) to get an HTTP response. It is the timeout to get a response, not the
+ * timeout to read response data. Default value: 00:05:00. Type: string (or Expression with
+ * resultType string), pattern: ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- requestTimeout?: any;
+ httpRequestTimeout?: any;
}
/**
- * Sftp read settings.
+ * A copy activity source for SAP Cloud for Customer source.
*/
-export interface SftpReadSettings {
+export interface SapCloudForCustomerSource {
/**
* Polymorphic Discriminator
*/
- type: "SftpReadSettings";
+ type: "SapCloudForCustomerSource";
/**
- * The maximum concurrent connection count for the source data store. Type: integer (or
- * Expression with resultType integer).
+ * Source retry count. Type: integer (or Expression with resultType integer).
*/
- maxConcurrentConnections?: any;
+ sourceRetryCount?: any;
/**
- * If true, files under the folder path will be read recursively. Default is true. Type: boolean
- * (or Expression with resultType boolean).
+ * Source retry wait. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- recursive?: any;
+ sourceRetryWait?: any;
/**
- * Sftp wildcardFolderPath. Type: string (or Expression with resultType string).
+ * The maximum concurrent connection count for the source data store. Type: integer (or
+ * Expression with resultType integer).
*/
- wildcardFolderPath?: any;
+ maxConcurrentConnections?: any;
/**
- * Sftp wildcardFileName. Type: string (or Expression with resultType string).
+ * Query timeout. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- wildcardFileName?: any;
+ queryTimeout?: any;
/**
- * Point to a text file that lists each file (relative path to the path configured in the
- * dataset) that you want to copy. Type: string (or Expression with resultType string).
+ * Specifies the additional columns to be added to source data. Type: array of objects (or
+ * Expression with resultType array of objects).
*/
- fileListPath?: any;
+ additionalColumns?: AdditionalColumns[];
/**
- * The start of file's modified datetime. Type: string (or Expression with resultType string).
+ * SAP Cloud for Customer OData query. For example, "$top=1". Type: string (or Expression with
+ * resultType string).
*/
- modifiedDatetimeStart?: any;
+ query?: any;
/**
- * The end of file's modified datetime. Type: string (or Expression with resultType string).
+ * The timeout (TimeSpan) to get an HTTP response. It is the timeout to get a response, not the
+ * timeout to read response data. Default value: 00:05:00. Type: string (or Expression with
+ * resultType string), pattern: ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- modifiedDatetimeEnd?: any;
+ httpRequestTimeout?: any;
}
/**
- * Ftp read settings.
+ * A copy activity Salesforce source.
*/
-export interface FtpReadSettings {
+export interface SalesforceSource {
/**
* Polymorphic Discriminator
*/
- type: "FtpReadSettings";
+ type: "SalesforceSource";
+ /**
+ * Source retry count. Type: integer (or Expression with resultType integer).
+ */
+ sourceRetryCount?: any;
+ /**
+ * Source retry wait. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ */
+ sourceRetryWait?: any;
/**
* The maximum concurrent connection count for the source data store. Type: integer (or
* Expression with resultType integer).
*/
maxConcurrentConnections?: any;
/**
- * If true, files under the folder path will be read recursively. Default is true. Type: boolean
- * (or Expression with resultType boolean).
- */
- recursive?: any;
- /**
- * Ftp wildcardFolderPath. Type: string (or Expression with resultType string).
+ * Query timeout. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- wildcardFolderPath?: any;
+ queryTimeout?: any;
/**
- * Ftp wildcardFileName. Type: string (or Expression with resultType string).
+ * Specifies the additional columns to be added to source data. Type: array of objects (or
+ * Expression with resultType array of objects).
*/
- wildcardFileName?: any;
+ additionalColumns?: AdditionalColumns[];
/**
- * Point to a text file that lists each file (relative path to the path configured in the
- * dataset) that you want to copy. Type: string (or Expression with resultType string).
+ * Database query. Type: string (or Expression with resultType string).
*/
- fileListPath?: any;
+ query?: any;
/**
- * Specify whether to use binary transfer mode for FTP stores.
+ * The read behavior for the operation. Default is Query. Possible values include: 'Query',
+ * 'QueryAll'
*/
- useBinaryTransfer?: boolean;
+ readBehavior?: SalesforceSourceReadBehavior;
}
/**
- * Google Cloud Storage read settings.
+ * A copy activity source for SapBW server via MDX.
*/
-export interface GoogleCloudStorageReadSettings {
+export interface SapBwSource {
/**
* Polymorphic Discriminator
*/
- type: "GoogleCloudStorageReadSettings";
- /**
- * The maximum concurrent connection count for the source data store. Type: integer (or
- * Expression with resultType integer).
- */
- maxConcurrentConnections?: any;
- /**
- * If true, files under the folder path will be read recursively. Default is true. Type: boolean
- * (or Expression with resultType boolean).
- */
- recursive?: any;
- /**
- * Google Cloud Storage wildcardFolderPath. Type: string (or Expression with resultType string).
- */
- wildcardFolderPath?: any;
+ type: "SapBwSource";
/**
- * Google Cloud Storage wildcardFileName. Type: string (or Expression with resultType string).
+ * Source retry count. Type: integer (or Expression with resultType integer).
*/
- wildcardFileName?: any;
+ sourceRetryCount?: any;
/**
- * The prefix filter for the Google Cloud Storage object name. Type: string (or Expression with
- * resultType string).
+ * Source retry wait. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- prefix?: any;
+ sourceRetryWait?: any;
/**
- * Point to a text file that lists each file (relative path to the path configured in the
- * dataset) that you want to copy. Type: string (or Expression with resultType string).
+ * The maximum concurrent connection count for the source data store. Type: integer (or
+ * Expression with resultType integer).
*/
- fileListPath?: any;
+ maxConcurrentConnections?: any;
/**
- * Indicates whether to enable partition discovery.
+ * Query timeout. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- enablePartitionDiscovery?: boolean;
+ queryTimeout?: any;
/**
- * The start of file's modified datetime. Type: string (or Expression with resultType string).
+ * Specifies the additional columns to be added to source data. Type: array of objects (or
+ * Expression with resultType array of objects).
*/
- modifiedDatetimeStart?: any;
+ additionalColumns?: AdditionalColumns[];
/**
- * The end of file's modified datetime. Type: string (or Expression with resultType string).
+ * MDX query. Type: string (or Expression with resultType string).
*/
- modifiedDatetimeEnd?: any;
+ query?: any;
}
/**
- * Azure File Storage read settings.
+ * A copy activity source for Sybase databases.
*/
-export interface AzureFileStorageReadSettings {
+export interface SybaseSource {
/**
* Polymorphic Discriminator
*/
- type: "AzureFileStorageReadSettings";
- /**
- * The maximum concurrent connection count for the source data store. Type: integer (or
- * Expression with resultType integer).
- */
- maxConcurrentConnections?: any;
- /**
- * If true, files under the folder path will be read recursively. Default is true. Type: boolean
- * (or Expression with resultType boolean).
- */
- recursive?: any;
+ type: "SybaseSource";
/**
- * Azure File Storage wildcardFolderPath. Type: string (or Expression with resultType string).
+ * Source retry count. Type: integer (or Expression with resultType integer).
*/
- wildcardFolderPath?: any;
+ sourceRetryCount?: any;
/**
- * Azure File Storage wildcardFileName. Type: string (or Expression with resultType string).
+ * Source retry wait. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- wildcardFileName?: any;
+ sourceRetryWait?: any;
/**
- * Point to a text file that lists each file (relative path to the path configured in the
- * dataset) that you want to copy. Type: string (or Expression with resultType string).
+ * The maximum concurrent connection count for the source data store. Type: integer (or
+ * Expression with resultType integer).
*/
- fileListPath?: any;
+ maxConcurrentConnections?: any;
/**
- * Indicates whether to enable partition discovery.
+ * Query timeout. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- enablePartitionDiscovery?: boolean;
+ queryTimeout?: any;
/**
- * The start of file's modified datetime. Type: string (or Expression with resultType string).
+ * Specifies the additional columns to be added to source data. Type: array of objects (or
+ * Expression with resultType array of objects).
*/
- modifiedDatetimeStart?: any;
+ additionalColumns?: AdditionalColumns[];
/**
- * The end of file's modified datetime. Type: string (or Expression with resultType string).
+ * Database query. Type: string (or Expression with resultType string).
*/
- modifiedDatetimeEnd?: any;
+ query?: any;
}
/**
- * File server read settings.
+ * A copy activity source for PostgreSQL databases.
*/
-export interface FileServerReadSettings {
+export interface PostgreSqlSource {
/**
* Polymorphic Discriminator
*/
- type: "FileServerReadSettings";
- /**
- * The maximum concurrent connection count for the source data store. Type: integer (or
- * Expression with resultType integer).
- */
- maxConcurrentConnections?: any;
- /**
- * If true, files under the folder path will be read recursively. Default is true. Type: boolean
- * (or Expression with resultType boolean).
- */
- recursive?: any;
+ type: "PostgreSqlSource";
/**
- * FileServer wildcardFolderPath. Type: string (or Expression with resultType string).
+ * Source retry count. Type: integer (or Expression with resultType integer).
*/
- wildcardFolderPath?: any;
+ sourceRetryCount?: any;
/**
- * FileServer wildcardFileName. Type: string (or Expression with resultType string).
+ * Source retry wait. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- wildcardFileName?: any;
+ sourceRetryWait?: any;
/**
- * Point to a text file that lists each file (relative path to the path configured in the
- * dataset) that you want to copy. Type: string (or Expression with resultType string).
+ * The maximum concurrent connection count for the source data store. Type: integer (or
+ * Expression with resultType integer).
*/
- fileListPath?: any;
+ maxConcurrentConnections?: any;
/**
- * Indicates whether to enable partition discovery.
+ * Query timeout. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- enablePartitionDiscovery?: boolean;
+ queryTimeout?: any;
/**
- * The start of file's modified datetime. Type: string (or Expression with resultType string).
+ * Specifies the additional columns to be added to source data. Type: array of objects (or
+ * Expression with resultType array of objects).
*/
- modifiedDatetimeStart?: any;
+ additionalColumns?: AdditionalColumns[];
/**
- * The end of file's modified datetime. Type: string (or Expression with resultType string).
+ * Database query. Type: string (or Expression with resultType string).
*/
- modifiedDatetimeEnd?: any;
+ query?: any;
}
/**
- * Azure data lake store read settings.
+ * A copy activity source for MySQL databases.
*/
-export interface AmazonS3ReadSettings {
+export interface MySqlSource {
/**
* Polymorphic Discriminator
*/
- type: "AmazonS3ReadSettings";
+ type: "MySqlSource";
+ /**
+ * Source retry count. Type: integer (or Expression with resultType integer).
+ */
+ sourceRetryCount?: any;
+ /**
+ * Source retry wait. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ */
+ sourceRetryWait?: any;
/**
* The maximum concurrent connection count for the source data store. Type: integer (or
* Expression with resultType integer).
*/
maxConcurrentConnections?: any;
/**
- * If true, files under the folder path will be read recursively. Default is true. Type: boolean
- * (or Expression with resultType boolean).
+ * Query timeout. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- recursive?: any;
+ queryTimeout?: any;
/**
- * AmazonS3 wildcardFolderPath. Type: string (or Expression with resultType string).
+ * Specifies the additional columns to be added to source data. Type: array of objects (or
+ * Expression with resultType array of objects).
*/
- wildcardFolderPath?: any;
+ additionalColumns?: AdditionalColumns[];
/**
- * AmazonS3 wildcardFileName. Type: string (or Expression with resultType string).
+ * Database query. Type: string (or Expression with resultType string).
*/
- wildcardFileName?: any;
+ query?: any;
+}
+
+/**
+ * A copy activity source for ODBC databases.
+ */
+export interface OdbcSource {
/**
- * The prefix filter for the S3 object name. Type: string (or Expression with resultType string).
+ * Polymorphic Discriminator
*/
- prefix?: any;
+ type: "OdbcSource";
/**
- * Point to a text file that lists each file (relative path to the path configured in the
- * dataset) that you want to copy. Type: string (or Expression with resultType string).
+ * Source retry count. Type: integer (or Expression with resultType integer).
*/
- fileListPath?: any;
+ sourceRetryCount?: any;
/**
- * Indicates whether to enable partition discovery.
+ * Source retry wait. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- enablePartitionDiscovery?: boolean;
+ sourceRetryWait?: any;
/**
- * The start of file's modified datetime. Type: string (or Expression with resultType string).
+ * The maximum concurrent connection count for the source data store. Type: integer (or
+ * Expression with resultType integer).
*/
- modifiedDatetimeStart?: any;
+ maxConcurrentConnections?: any;
/**
- * The end of file's modified datetime. Type: string (or Expression with resultType string).
+ * Query timeout. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- modifiedDatetimeEnd?: any;
+ queryTimeout?: any;
+ /**
+ * Specifies the additional columns to be added to source data. Type: array of objects (or
+ * Expression with resultType array of objects).
+ */
+ additionalColumns?: AdditionalColumns[];
+ /**
+ * Database query. Type: string (or Expression with resultType string).
+ */
+ query?: any;
}
/**
- * Azure data lake store read settings.
+ * A copy activity source for Db2 databases.
*/
-export interface AzureDataLakeStoreReadSettings {
+export interface Db2Source {
/**
* Polymorphic Discriminator
*/
- type: "AzureDataLakeStoreReadSettings";
- /**
- * The maximum concurrent connection count for the source data store. Type: integer (or
- * Expression with resultType integer).
- */
- maxConcurrentConnections?: any;
- /**
- * If true, files under the folder path will be read recursively. Default is true. Type: boolean
- * (or Expression with resultType boolean).
- */
- recursive?: any;
+ type: "Db2Source";
/**
- * ADLS wildcardFolderPath. Type: string (or Expression with resultType string).
+ * Source retry count. Type: integer (or Expression with resultType integer).
*/
- wildcardFolderPath?: any;
+ sourceRetryCount?: any;
/**
- * ADLS wildcardFileName. Type: string (or Expression with resultType string).
+ * Source retry wait. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- wildcardFileName?: any;
+ sourceRetryWait?: any;
/**
- * Point to a text file that lists each file (relative path to the path configured in the
- * dataset) that you want to copy. Type: string (or Expression with resultType string).
+ * The maximum concurrent connection count for the source data store. Type: integer (or
+ * Expression with resultType integer).
*/
- fileListPath?: any;
+ maxConcurrentConnections?: any;
/**
- * Indicates whether to enable partition discovery.
+ * Query timeout. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- enablePartitionDiscovery?: boolean;
+ queryTimeout?: any;
/**
- * The start of file's modified datetime. Type: string (or Expression with resultType string).
+ * Specifies the additional columns to be added to source data. Type: array of objects (or
+ * Expression with resultType array of objects).
*/
- modifiedDatetimeStart?: any;
+ additionalColumns?: AdditionalColumns[];
/**
- * The end of file's modified datetime. Type: string (or Expression with resultType string).
+ * Database query. Type: string (or Expression with resultType string).
*/
- modifiedDatetimeEnd?: any;
+ query?: any;
}
/**
- * Azure blobFS read settings.
+ * A copy activity source for Informix.
*/
-export interface AzureBlobFSReadSettings {
+export interface InformixSource {
/**
* Polymorphic Discriminator
*/
- type: "AzureBlobFSReadSettings";
- /**
- * The maximum concurrent connection count for the source data store. Type: integer (or
- * Expression with resultType integer).
- */
- maxConcurrentConnections?: any;
- /**
- * If true, files under the folder path will be read recursively. Default is true. Type: boolean
- * (or Expression with resultType boolean).
- */
- recursive?: any;
+ type: "InformixSource";
/**
- * Azure blobFS wildcardFolderPath. Type: string (or Expression with resultType string).
+ * Source retry count. Type: integer (or Expression with resultType integer).
*/
- wildcardFolderPath?: any;
+ sourceRetryCount?: any;
/**
- * Azure blobFS wildcardFileName. Type: string (or Expression with resultType string).
+ * Source retry wait. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- wildcardFileName?: any;
+ sourceRetryWait?: any;
/**
- * Point to a text file that lists each file (relative path to the path configured in the
- * dataset) that you want to copy. Type: string (or Expression with resultType string).
+ * The maximum concurrent connection count for the source data store. Type: integer (or
+ * Expression with resultType integer).
*/
- fileListPath?: any;
+ maxConcurrentConnections?: any;
/**
- * Indicates whether to enable partition discovery.
+ * Query timeout. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- enablePartitionDiscovery?: boolean;
+ queryTimeout?: any;
/**
- * The start of file's modified datetime. Type: string (or Expression with resultType string).
+ * Specifies the additional columns to be added to source data. Type: array of objects (or
+ * Expression with resultType array of objects).
*/
- modifiedDatetimeStart?: any;
+ additionalColumns?: AdditionalColumns[];
/**
- * The end of file's modified datetime. Type: string (or Expression with resultType string).
+ * Database query. Type: string (or Expression with resultType string).
*/
- modifiedDatetimeEnd?: any;
+ query?: any;
}
/**
- * Azure blob read settings.
+ * A copy activity Azure Table source.
*/
-export interface AzureBlobStorageReadSettings {
+export interface AzureTableSource {
/**
* Polymorphic Discriminator
*/
- type: "AzureBlobStorageReadSettings";
- /**
- * The maximum concurrent connection count for the source data store. Type: integer (or
- * Expression with resultType integer).
- */
- maxConcurrentConnections?: any;
- /**
- * If true, files under the folder path will be read recursively. Default is true. Type: boolean
- * (or Expression with resultType boolean).
- */
- recursive?: any;
+ type: "AzureTableSource";
/**
- * Azure blob wildcardFolderPath. Type: string (or Expression with resultType string).
+ * Source retry count. Type: integer (or Expression with resultType integer).
*/
- wildcardFolderPath?: any;
+ sourceRetryCount?: any;
/**
- * Azure blob wildcardFileName. Type: string (or Expression with resultType string).
+ * Source retry wait. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- wildcardFileName?: any;
+ sourceRetryWait?: any;
/**
- * The prefix filter for the Azure Blob name. Type: string (or Expression with resultType
- * string).
+ * The maximum concurrent connection count for the source data store. Type: integer (or
+ * Expression with resultType integer).
*/
- prefix?: any;
+ maxConcurrentConnections?: any;
/**
- * Point to a text file that lists each file (relative path to the path configured in the
- * dataset) that you want to copy. Type: string (or Expression with resultType string).
+ * Query timeout. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- fileListPath?: any;
+ queryTimeout?: any;
/**
- * Indicates whether to enable partition discovery.
+ * Specifies the additional columns to be added to source data. Type: array of objects (or
+ * Expression with resultType array of objects).
*/
- enablePartitionDiscovery?: boolean;
+ additionalColumns?: AdditionalColumns[];
/**
- * The start of file's modified datetime. Type: string (or Expression with resultType string).
+ * Azure Table source query. Type: string (or Expression with resultType string).
*/
- modifiedDatetimeStart?: any;
+ azureTableSourceQuery?: any;
/**
- * The end of file's modified datetime. Type: string (or Expression with resultType string).
+ * Azure Table source ignore table not found. Type: boolean (or Expression with resultType
+ * boolean).
*/
- modifiedDatetimeEnd?: any;
+ azureTableSourceIgnoreTableNotFound?: any;
}
/**
@@ -17685,6 +18525,10 @@ export interface BinarySource {
* Binary store settings.
*/
storeSettings?: StoreReadSettingsUnion;
+ /**
+ * Binary format settings.
+ */
+ formatSettings?: BinaryReadSettings;
}
/**
@@ -17721,13 +18565,13 @@ export interface OrcSource {
}
/**
- * A copy activity Json source.
+ * A copy activity Xml source.
*/
-export interface JsonSource {
+export interface XmlSource {
/**
* Polymorphic Discriminator
*/
- type: "JsonSource";
+ type: "XmlSource";
/**
* Source retry count. Type: integer (or Expression with resultType integer).
*/
@@ -17743,9 +18587,13 @@ export interface JsonSource {
*/
maxConcurrentConnections?: any;
/**
- * Json store settings.
+ * Xml store settings.
*/
storeSettings?: StoreReadSettingsUnion;
+ /**
+ * Xml format settings.
+ */
+ formatSettings?: XmlReadSettings;
/**
* Specifies the additional columns to be added to source data. Type: array of objects (or
* Expression with resultType array of objects).
@@ -17754,37 +18602,40 @@ export interface JsonSource {
}
/**
- * Contains the possible cases for FormatReadSettings.
- */
-export type FormatReadSettingsUnion = FormatReadSettings | DelimitedTextReadSettings;
-
-/**
- * Format read settings.
+ * A copy activity Json source.
*/
-export interface FormatReadSettings {
+export interface JsonSource {
/**
* Polymorphic Discriminator
*/
- type: "FormatReadSettings";
+ type: "JsonSource";
/**
- * Describes unknown properties. The value of an unknown property can be of "any" type.
+ * Source retry count. Type: integer (or Expression with resultType integer).
*/
- [property: string]: any;
-}
-
-/**
- * Delimited text read settings.
- */
-export interface DelimitedTextReadSettings {
+ sourceRetryCount?: any;
/**
- * Polymorphic Discriminator
+ * Source retry wait. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
*/
- type: "DelimitedTextReadSettings";
+ sourceRetryWait?: any;
/**
- * Indicates the number of non-empty rows to skip when reading data from input files. Type:
- * integer (or Expression with resultType integer).
+ * The maximum concurrent connection count for the source data store. Type: integer (or
+ * Expression with resultType integer).
*/
- skipLineCount?: any;
+ maxConcurrentConnections?: any;
+ /**
+ * Json store settings.
+ */
+ storeSettings?: StoreReadSettingsUnion;
+ /**
+ * Json format settings.
+ */
+ formatSettings?: JsonReadSettings;
+ /**
+ * Specifies the additional columns to be added to source data. Type: array of objects (or
+ * Expression with resultType array of objects).
+ */
+ additionalColumns?: AdditionalColumns[];
}
/**
@@ -17857,6 +18708,39 @@ export interface ParquetSource {
additionalColumns?: AdditionalColumns[];
}
+/**
+ * A copy activity excel source.
+ */
+export interface ExcelSource {
+ /**
+ * Polymorphic Discriminator
+ */
+ type: "ExcelSource";
+ /**
+ * Source retry count. Type: integer (or Expression with resultType integer).
+ */
+ sourceRetryCount?: any;
+ /**
+ * Source retry wait. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ */
+ sourceRetryWait?: any;
+ /**
+ * The maximum concurrent connection count for the source data store. Type: integer (or
+ * Expression with resultType integer).
+ */
+ maxConcurrentConnections?: any;
+ /**
+ * Excel store settings.
+ */
+ storeSettings?: StoreReadSettingsUnion;
+ /**
+ * Specifies the additional columns to be added to source data. Type: array of objects (or
+ * Expression with resultType array of objects).
+ */
+ additionalColumns?: AdditionalColumns[];
+}
+
/**
* A copy activity Avro source.
*/
@@ -18054,6 +18938,10 @@ export interface DeleteActivity {
* Delete activity dataset reference.
*/
dataset: DatasetReference;
+ /**
+ * Delete activity store settings.
+ */
+ storeSettings?: StoreReadSettingsUnion;
}
/**
@@ -18286,7 +19174,8 @@ export interface SSISPackageLocation {
*/
packagePath?: any;
/**
- * The type of SSIS package location. Possible values include: 'SSISDB', 'File', 'InlinePackage'
+ * The type of SSIS package location. Possible values include: 'SSISDB', 'File', 'InlinePackage',
+ * 'PackageStore'
*/
type?: SsisPackageLocationType;
/**
@@ -18302,6 +19191,10 @@ export interface SSISPackageLocation {
* string).
*/
configurationPath?: any;
+ /**
+ * The configuration file access credential.
+ */
+ configurationAccessCredential?: SSISAccessCredential;
/**
* The package name.
*/
@@ -18813,7 +19706,7 @@ export interface StagingSettings {
/**
* Contains the possible cases for CopySink.
*/
-export type CopySinkUnion = CopySink | CosmosDbMongoDbApiSink | SalesforceServiceCloudSink | SalesforceSink | AzureDataExplorerSink | CommonDataServiceForAppsSink | DynamicsCrmSink | DynamicsSink | MicrosoftAccessSink | InformixSink | OdbcSink | AzureSearchIndexSink | AzureBlobFSSink | AzureDataLakeStoreSink | OracleSink | SqlDWSink | SqlMISink | AzureSqlSink | SqlServerSink | SqlSink | CosmosDbSqlApiSink | DocumentDbCollectionSink | FileSystemSink | BlobSink | BinarySink | ParquetSink | AvroSink | AzureTableSink | AzureQueueSink | SapCloudForCustomerSink | AzureMySqlSink | AzurePostgreSqlSink | OrcSink | JsonSink | DelimitedTextSink;
+export type CopySinkUnion = CopySink | CosmosDbMongoDbApiSink | SalesforceServiceCloudSink | SalesforceSink | AzureDataExplorerSink | CommonDataServiceForAppsSink | DynamicsCrmSink | DynamicsSink | MicrosoftAccessSink | InformixSink | OdbcSink | AzureSearchIndexSink | AzureBlobFSSink | AzureDataLakeStoreSink | OracleSink | SnowflakeSink | SqlDWSink | SqlMISink | AzureSqlSink | SqlServerSink | SqlSink | CosmosDbSqlApiSink | DocumentDbCollectionSink | FileSystemSink | BlobSink | BinarySink | ParquetSink | AvroSink | AzureTableSink | AzureQueueSink | SapCloudForCustomerSink | AzureMySqlSink | AzurePostgreSqlSink | OrcSink | JsonSink | DelimitedTextSink;
/**
* A copy activity sink.
@@ -19438,6 +20331,88 @@ export interface OracleSink {
preCopyScript?: any;
}
+/**
+ * Contains the possible cases for ImportSettings.
+ */
+export type ImportSettingsUnion = ImportSettings | SnowflakeImportCopyCommand;
+
+/**
+ * Import command settings.
+ */
+export interface ImportSettings {
+ /**
+ * Polymorphic Discriminator
+ */
+ type: "ImportSettings";
+ /**
+ * Describes unknown properties. The value of an unknown property can be of "any" type.
+ */
+ [property: string]: any;
+}
+
+/**
+ * Snowflake import command settings.
+ */
+export interface SnowflakeImportCopyCommand {
+ /**
+ * Polymorphic Discriminator
+ */
+ type: "SnowflakeImportCopyCommand";
+ /**
+ * Additional copy options directly passed to snowflake Copy Command. Type: key value pairs
+ * (value should be string type) (or Expression with resultType object). Example:
+ * "additionalCopyOptions": { "DATE_FORMAT": "MM/DD/YYYY", "TIME_FORMAT": "'HH24:MI:SS.FF'" }
+ */
+ additionalCopyOptions?: { [propertyName: string]: any };
+ /**
+ * Additional format options directly passed to snowflake Copy Command. Type: key value pairs
+ * (value should be string type) (or Expression with resultType object). Example:
+ * "additionalFormatOptions": { "FORCE": "TRUE", "LOAD_UNCERTAIN_FILES": "'FALSE'" }
+ */
+ additionalFormatOptions?: { [propertyName: string]: any };
+}
+
+/**
+ * A copy activity snowflake sink.
+ */
+export interface SnowflakeSink {
+ /**
+ * Polymorphic Discriminator
+ */
+ type: "SnowflakeSink";
+ /**
+ * Write batch size. Type: integer (or Expression with resultType integer), minimum: 0.
+ */
+ writeBatchSize?: any;
+ /**
+ * Write batch timeout. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ */
+ writeBatchTimeout?: any;
+ /**
+ * Sink retry count. Type: integer (or Expression with resultType integer).
+ */
+ sinkRetryCount?: any;
+ /**
+ * Sink retry wait. Type: string (or Expression with resultType string), pattern:
+ * ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ */
+ sinkRetryWait?: any;
+ /**
+ * The maximum concurrent connection count for the sink data store. Type: integer (or Expression
+ * with resultType integer).
+ */
+ maxConcurrentConnections?: any;
+ /**
+ * SQL pre-copy script. Type: string (or Expression with resultType string).
+ */
+ preCopyScript?: any;
+ /**
+ * Snowflake import settings.
+ */
+ importSettings?: SnowflakeImportCopyCommand;
+}
+
/**
* Default value.
*/
@@ -20418,6 +21393,12 @@ export interface SapCloudForCustomerSink {
* 'Update'
*/
writeBehavior?: SapCloudForCustomerSinkWriteBehavior;
+ /**
+ * The timeout (TimeSpan) to get an HTTP response. It is the timeout to get a response, not the
+ * timeout to read response data. Default value: 00:05:00. Type: string (or Expression with
+ * resultType string), pattern: ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ */
+ httpRequestTimeout?: any;
}
/**
@@ -21032,7 +22013,7 @@ export interface WaitActivity {
/**
* Duration in seconds.
*/
- waitTimeInSeconds: number;
+ waitTimeInSeconds: any;
}
/**
@@ -21648,6 +22629,35 @@ export interface SelfHostedIntegrationRuntime {
linkedInfo?: LinkedIntegrationRuntimeTypeUnion;
}
+/**
+ * The entity reference.
+ */
+export interface EntityReference {
+ /**
+ * The type of this referenced entity. Possible values include: 'IntegrationRuntimeReference',
+ * 'LinkedServiceReference'
+ */
+ type?: IntegrationRuntimeEntityReferenceType;
+ /**
+ * The name of this referenced entity.
+ */
+ referenceName?: string;
+}
+
+/**
+ * Package store for the SSIS integration runtime.
+ */
+export interface PackageStore {
+ /**
+ * The name of the package store
+ */
+ name: string;
+ /**
+ * The package store linked service reference.
+ */
+ packageStoreLinkedService: EntityReference;
+}
+
/**
* Contains the possible cases for CustomSetupBase.
*/
@@ -21721,21 +22731,6 @@ export interface CmdkeySetup {
password: SecretBaseUnion;
}
-/**
- * The entity reference.
- */
-export interface EntityReference {
- /**
- * The type of this referenced entity. Possible values include: 'IntegrationRuntimeReference',
- * 'LinkedServiceReference'
- */
- type?: IntegrationRuntimeEntityReferenceType;
- /**
- * The name of this referenced entity.
- */
- referenceName?: string;
-}
-
/**
* Data proxy properties for a managed dedicated integration runtime.
*/
@@ -21826,6 +22821,10 @@ export interface IntegrationRuntimeSsisProperties {
* Custom setup without script properties for a SSIS integration runtime.
*/
expressCustomSetupProperties?: CustomSetupBaseUnion[];
+ /**
+ * Package stores for the SSIS Integration Runtime.
+ */
+ packageStores?: PackageStore[];
/**
* Describes unknown properties. The value of an unknown property can be of "any" type.
*/
@@ -22704,6 +23703,14 @@ export interface QueryDataFlowDebugSessionsResponse extends Array