From fa8ecdc7e227cfc6f20f975ff771ec6c2885058a Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Fri, 27 Oct 2017 14:24:51 -0700 Subject: [PATCH] Repository Migration (#2) --- dlp/.eslintrc.yml | 3 + dlp/deid.js | 132 ++++++----- dlp/inspect.js | 355 +++++++++++++++++------------ dlp/metadata.js | 62 ++--- dlp/package.json | 53 +---- dlp/quickstart.js | 20 +- dlp/redact.js | 81 ++++--- dlp/risk.js | 246 ++++++++++++-------- dlp/system-test/.eslintrc.yml | 5 + dlp/system-test/deid.test.js | 37 ++- dlp/system-test/inspect.test.js | 231 +++++++++++++------ dlp/system-test/metadata.test.js | 4 +- dlp/system-test/quickstart.test.js | 2 +- dlp/system-test/redact.test.js | 82 +++++-- dlp/system-test/risk.test.js | 77 +++++-- 15 files changed, 837 insertions(+), 553 deletions(-) create mode 100644 dlp/.eslintrc.yml create mode 100644 dlp/system-test/.eslintrc.yml diff --git a/dlp/.eslintrc.yml b/dlp/.eslintrc.yml new file mode 100644 index 0000000000..282535f55f --- /dev/null +++ b/dlp/.eslintrc.yml @@ -0,0 +1,3 @@ +--- +rules: + no-console: off diff --git a/dlp/deid.js b/dlp/deid.js index 31e7083664..94aacdd518 100644 --- a/dlp/deid.js +++ b/dlp/deid.js @@ -15,7 +15,7 @@ 'use strict'; -function deidentifyWithMask (string, maskingCharacter, numberToMask) { +function deidentifyWithMask(string, maskingCharacter, numberToMask) { // [START deidentify_masking] // Imports the Google Cloud Data Loss Prevention library const DLP = require('@google-cloud/dlp'); @@ -34,36 +34,39 @@ function deidentifyWithMask (string, maskingCharacter, numberToMask) { // const maskingCharacter = 'x'; // Construct deidentification request - const items = [{ type: 'text/plain', value: string }]; + const items = [{type: 'text/plain', value: string}]; const request = { deidentifyConfig: { infoTypeTransformations: { - transformations: [{ - primitiveTransformation: { - characterMaskConfig: { - maskingCharacter: maskingCharacter, - numberToMask: numberToMask - } - } - }] - } + transformations: [ + { + primitiveTransformation: { + characterMaskConfig: { + maskingCharacter: maskingCharacter, + numberToMask: numberToMask, + }, + }, + }, + ], + }, }, - items: items + items: items, }; // Run deidentification request - dlp.deidentifyContent(request) - .then((response) => { + dlp + .deidentifyContent(request) + .then(response => { const deidentifiedItems = response[0].items; console.log(deidentifiedItems[0].value); }) - .catch((err) => { + .catch(err => { console.log(`Error in deidentifyWithMask: ${err.message || err}`); }); // [END deidentify_masking] } -function deidentifyWithFpe (string, alphabet, keyName, wrappedKey) { +function deidentifyWithFpe(string, alphabet, keyName, wrappedKey) { // [START deidentify_fpe] // Imports the Google Cloud Data Loss Prevention library const DLP = require('@google-cloud/dlp'); @@ -86,35 +89,38 @@ function deidentifyWithFpe (string, alphabet, keyName, wrappedKey) { // const wrappedKey = 'YOUR_ENCRYPTED_AES_256_KEY' // Construct deidentification request - const items = [{ type: 'text/plain', value: string }]; + const items = [{type: 'text/plain', value: string}]; const request = { deidentifyConfig: { infoTypeTransformations: { - transformations: [{ - primitiveTransformation: { - cryptoReplaceFfxFpeConfig: { - cryptoKey: { - kmsWrapped: { - wrappedKey: wrappedKey, - cryptoKeyName: keyName - } + transformations: [ + { + primitiveTransformation: { + cryptoReplaceFfxFpeConfig: { + cryptoKey: { + kmsWrapped: { + wrappedKey: wrappedKey, + cryptoKeyName: keyName, + }, + }, + commonAlphabet: alphabet, }, - commonAlphabet: alphabet - } - } - }] - } + }, + }, + ], + }, }, - items: items + items: items, }; // Run deidentification request - dlp.deidentifyContent(request) - .then((response) => { + dlp + .deidentifyContent(request) + .then(response => { const deidentifiedItems = response[0].items; console.log(deidentifiedItems[0].value); }) - .catch((err) => { + .catch(err => { console.log(`Error in deidentifyWithFpe: ${err.message || err}`); }); // [END deidentify_fpe] @@ -125,35 +131,49 @@ const cli = require(`yargs`) .command( `mask `, `Deidentify sensitive data by masking it with a character.`, - { - maskingCharacter: { - type: 'string', - alias: 'c', - default: '' + { + maskingCharacter: { + type: 'string', + alias: 'c', + default: '', + }, + numberToMask: { + type: 'number', + alias: 'n', + default: 0, + }, }, - numberToMask: { - type: 'number', - alias: 'n', - default: 0 - } - }, - (opts) => deidentifyWithMask(opts.string, opts.maskingCharacter, opts.numberToMask) + opts => + deidentifyWithMask(opts.string, opts.maskingCharacter, opts.numberToMask) ) .command( `fpe `, `Deidentify sensitive data using Format Preserving Encryption (FPE).`, - { - alphabet: { - type: 'string', - alias: 'a', - default: 'ALPHA_NUMERIC', - choices: ['NUMERIC', 'HEXADECIMAL', 'UPPER_CASE_ALPHA_NUMERIC', 'ALPHA_NUMERIC'] - } - }, - (opts) => deidentifyWithFpe(opts.string, opts.alphabet, opts.keyName, opts.wrappedKey) + { + alphabet: { + type: 'string', + alias: 'a', + default: 'ALPHA_NUMERIC', + choices: [ + 'NUMERIC', + 'HEXADECIMAL', + 'UPPER_CASE_ALPHA_NUMERIC', + 'ALPHA_NUMERIC', + ], + }, + }, + opts => + deidentifyWithFpe( + opts.string, + opts.alphabet, + opts.keyName, + opts.wrappedKey + ) ) .example(`node $0 mask "My SSN is 372819127"`) - .example(`node $0 fpe "My SSN is 372819127" `) + .example( + `node $0 fpe "My SSN is 372819127" ` + ) .wrap(120) .recommendCommands() .epilogue(`For more information, see https://cloud.google.com/dlp/docs.`); diff --git a/dlp/inspect.js b/dlp/inspect.js index b01032e467..1d0db258ac 100644 --- a/dlp/inspect.js +++ b/dlp/inspect.js @@ -19,7 +19,13 @@ const fs = require('fs'); const mime = require('mime'); const Buffer = require('safe-buffer').Buffer; -function inspectString (string, minLikelihood, maxFindings, infoTypes, includeQuote) { +function inspectString( + string, + minLikelihood, + maxFindings, + infoTypes, + includeQuote +) { // [START inspect_string] // Imports the Google Cloud Data Loss Prevention library const DLP = require('@google-cloud/dlp'); @@ -43,7 +49,7 @@ function inspectString (string, minLikelihood, maxFindings, infoTypes, includeQu // const includeQuote = true; // Construct items to inspect - const items = [{ type: 'text/plain', value: string }]; + const items = [{type: 'text/plain', value: string}]; // Construct request const request = { @@ -51,18 +57,19 @@ function inspectString (string, minLikelihood, maxFindings, infoTypes, includeQu infoTypes: infoTypes, minLikelihood: minLikelihood, maxFindings: maxFindings, - includeQuote: includeQuote + includeQuote: includeQuote, }, - items: items + items: items, }; // Run request - dlp.inspectContent(request) - .then((response) => { + dlp + .inspectContent(request) + .then(response => { const findings = response[0].results[0].findings; if (findings.length > 0) { console.log(`Findings:`); - findings.forEach((finding) => { + findings.forEach(finding => { if (includeQuote) { console.log(`\tQuote: ${finding.quote}`); } @@ -73,13 +80,19 @@ function inspectString (string, minLikelihood, maxFindings, infoTypes, includeQu console.log(`No findings.`); } }) - .catch((err) => { + .catch(err => { console.log(`Error in inspectString: ${err.message || err}`); }); // [END inspect_string] } -function inspectFile (filepath, minLikelihood, maxFindings, infoTypes, includeQuote) { +function inspectFile( + filepath, + minLikelihood, + maxFindings, + infoTypes, + includeQuote +) { // [START inspect_file] // Imports the Google Cloud Data Loss Prevention library const DLP = require('@google-cloud/dlp'); @@ -103,10 +116,12 @@ function inspectFile (filepath, minLikelihood, maxFindings, infoTypes, includeQu // const includeQuote = true; // Construct file data to inspect - const fileItems = [{ - type: mime.lookup(filepath) || 'application/octet-stream', - data: Buffer.from(fs.readFileSync(filepath)).toString('base64') - }]; + const fileItems = [ + { + type: mime.lookup(filepath) || 'application/octet-stream', + data: Buffer.from(fs.readFileSync(filepath)).toString('base64'), + }, + ]; // Construct request const request = { @@ -114,18 +129,19 @@ function inspectFile (filepath, minLikelihood, maxFindings, infoTypes, includeQu infoTypes: infoTypes, minLikelihood: minLikelihood, maxFindings: maxFindings, - includeQuote: includeQuote + includeQuote: includeQuote, }, - items: fileItems + items: fileItems, }; // Run request - dlp.inspectContent(request) - .then((response) => { + dlp + .inspectContent(request) + .then(response => { const findings = response[0].results[0].findings; if (findings.length > 0) { console.log(`Findings:`); - findings.forEach((finding) => { + findings.forEach(finding => { if (includeQuote) { console.log(`\tQuote: ${finding.quote}`); } @@ -136,13 +152,19 @@ function inspectFile (filepath, minLikelihood, maxFindings, infoTypes, includeQu console.log(`No findings.`); } }) - .catch((err) => { + .catch(err => { console.log(`Error in inspectFile: ${err.message || err}`); }); // [END inspect_file] } -function promiseInspectGCSFile (bucketName, fileName, minLikelihood, maxFindings, infoTypes) { +function promiseInspectGCSFile( + bucketName, + fileName, + minLikelihood, + maxFindings, + infoTypes +) { // [START inspect_gcs_file_promise] // Imports the Google Cloud Data Loss Prevention library const DLP = require('@google-cloud/dlp'); @@ -169,8 +191,8 @@ function promiseInspectGCSFile (bucketName, fileName, minLikelihood, maxFindings // Get reference to the file to be inspected const storageItems = { cloudStorageOptions: { - fileSet: { url: `gs://${bucketName}/${fileName}` } - } + fileSet: {url: `gs://${bucketName}/${fileName}`}, + }, }; // Construct REST request body for creating an inspect job @@ -178,31 +200,32 @@ function promiseInspectGCSFile (bucketName, fileName, minLikelihood, maxFindings inspectConfig: { infoTypes: infoTypes, minLikelihood: minLikelihood, - maxFindings: maxFindings + maxFindings: maxFindings, }, - storageConfig: storageItems + storageConfig: storageItems, }; // Create a GCS File inspection job and wait for it to complete (using promises) - dlp.createInspectOperation(request) - .then((createJobResponse) => { + dlp + .createInspectOperation(request) + .then(createJobResponse => { const operation = createJobResponse[0]; // Start polling for job completion return operation.promise(); }) - .then((completeJobResponse) => { + .then(completeJobResponse => { // When job is complete, get its results const jobName = completeJobResponse[0].name; return dlp.listInspectFindings({ - name: jobName + name: jobName, }); }) - .then((results) => { + .then(results => { const findings = results[0].result.findings; if (findings.length > 0) { console.log(`Findings:`); - findings.forEach((finding) => { + findings.forEach(finding => { console.log(`\tInfo type: ${finding.infoType.name}`); console.log(`\tLikelihood: ${finding.likelihood}`); }); @@ -210,13 +233,19 @@ function promiseInspectGCSFile (bucketName, fileName, minLikelihood, maxFindings console.log(`No findings.`); } }) - .catch((err) => { + .catch(err => { console.log(`Error in promiseInspectGCSFile: ${err.message || err}`); }); // [END inspect_gcs_file_promise] } -function eventInspectGCSFile (bucketName, fileName, minLikelihood, maxFindings, infoTypes) { +function eventInspectGCSFile( + bucketName, + fileName, + minLikelihood, + maxFindings, + infoTypes +) { // [START inspect_gcs_file_event] // Imports the Google Cloud Data Loss Prevention library const DLP = require('@google-cloud/dlp'); @@ -243,8 +272,8 @@ function eventInspectGCSFile (bucketName, fileName, minLikelihood, maxFindings, // Get reference to the file to be inspected const storageItems = { cloudStorageOptions: { - fileSet: { url: `gs://${bucketName}/${fileName}` } - } + fileSet: {url: `gs://${bucketName}/${fileName}`}, + }, }; // Construct REST request body for creating an inspect job @@ -252,42 +281,45 @@ function eventInspectGCSFile (bucketName, fileName, minLikelihood, maxFindings, inspectConfig: { infoTypes: infoTypes, minLikelihood: minLikelihood, - maxFindings: maxFindings + maxFindings: maxFindings, }, - storageConfig: storageItems + storageConfig: storageItems, }; // Create a GCS File inspection job, and handle its completion (using event handlers) // Promises are used (only) to avoid nested callbacks - dlp.createInspectOperation(request) - .then((createJobResponse) => { + dlp + .createInspectOperation(request) + .then(createJobResponse => { const operation = createJobResponse[0]; return new Promise((resolve, reject) => { - operation.on('complete', (completeJobResponse) => { + operation.on('complete', completeJobResponse => { return resolve(completeJobResponse); }); // Handle changes in job metadata (e.g. progress updates) - operation.on('progress', (metadata) => { - console.log(`Processed ${metadata.processedBytes} of approximately ${metadata.totalEstimatedBytes} bytes.`); + operation.on('progress', metadata => { + console.log( + `Processed ${metadata.processedBytes} of approximately ${metadata.totalEstimatedBytes} bytes.` + ); }); - operation.on('error', (err) => { + operation.on('error', err => { return reject(err); }); }); }) - .then((completeJobResponse) => { + .then(completeJobResponse => { const jobName = completeJobResponse.name; return dlp.listInspectFindings({ - name: jobName + name: jobName, }); }) - .then((results) => { + .then(results => { const findings = results[0].result.findings; if (findings.length > 0) { console.log(`Findings:`); - findings.forEach((finding) => { + findings.forEach(finding => { console.log(`\tInfo type: ${finding.infoType.name}`); console.log(`\tLikelihood: ${finding.likelihood}`); }); @@ -295,13 +327,21 @@ function eventInspectGCSFile (bucketName, fileName, minLikelihood, maxFindings, console.log(`No findings.`); } }) - .catch((err) => { + .catch(err => { console.log(`Error in eventInspectGCSFile: ${err.message || err}`); }); // [END inspect_gcs_file_event] } -function inspectDatastore (projectId, namespaceId, kind, minLikelihood, maxFindings, infoTypes, includeQuote) { +function inspectDatastore( + projectId, + namespaceId, + kind, + minLikelihood, + maxFindings, + infoTypes + // includeQuote +) { // [START inspect_datastore] // Imports the Google Cloud Data Loss Prevention library const DLP = require('@google-cloud/dlp'); @@ -333,12 +373,12 @@ function inspectDatastore (projectId, namespaceId, kind, minLikelihood, maxFindi datastoreOptions: { partitionId: { projectId: projectId, - namespaceId: namespaceId + namespaceId: namespaceId, }, kind: { - name: kind - } - } + name: kind, + }, + }, }; // Construct request for creating an inspect job @@ -346,31 +386,32 @@ function inspectDatastore (projectId, namespaceId, kind, minLikelihood, maxFindi inspectConfig: { infoTypes: infoTypes, minLikelihood: minLikelihood, - maxFindings: maxFindings + maxFindings: maxFindings, }, - storageConfig: storageItems + storageConfig: storageItems, }; // Run inspect-job creation request - dlp.createInspectOperation(request) - .then((createJobResponse) => { + dlp + .createInspectOperation(request) + .then(createJobResponse => { const operation = createJobResponse[0]; // Start polling for job completion return operation.promise(); }) - .then((completeJobResponse) => { + .then(completeJobResponse => { // When job is complete, get its results const jobName = completeJobResponse[0].name; return dlp.listInspectFindings({ - name: jobName + name: jobName, }); }) - .then((results) => { + .then(results => { const findings = results[0].result.findings; if (findings.length > 0) { console.log(`Findings:`); - findings.forEach((finding) => { + findings.forEach(finding => { console.log(`\tInfo type: ${finding.infoType.name}`); console.log(`\tLikelihood: ${finding.likelihood}`); }); @@ -378,13 +419,21 @@ function inspectDatastore (projectId, namespaceId, kind, minLikelihood, maxFindi console.log(`No findings.`); } }) - .catch((err) => { + .catch(err => { console.log(`Error in inspectDatastore: ${err.message || err}`); }); // [END inspect_datastore] } -function inspectBigquery (projectId, datasetId, tableId, minLikelihood, maxFindings, infoTypes, includeQuote) { +function inspectBigquery( + projectId, + datasetId, + tableId, + minLikelihood, + maxFindings, + infoTypes + // includeQuote +) { // [START inspect_bigquery] // Imports the Google Cloud Data Loss Prevention library const DLP = require('@google-cloud/dlp'); @@ -416,9 +465,9 @@ function inspectBigquery (projectId, datasetId, tableId, minLikelihood, maxFindi tableReference: { projectId: projectId, datasetId: datasetId, - tableId: tableId - } - } + tableId: tableId, + }, + }, }; // Construct request for creating an inspect job @@ -426,31 +475,32 @@ function inspectBigquery (projectId, datasetId, tableId, minLikelihood, maxFindi inspectConfig: { infoTypes: infoTypes, minLikelihood: minLikelihood, - maxFindings: maxFindings + maxFindings: maxFindings, }, - storageConfig: storageItems + storageConfig: storageItems, }; // Run inspect-job creation request - dlp.createInspectOperation(request) - .then((createJobResponse) => { + dlp + .createInspectOperation(request) + .then(createJobResponse => { const operation = createJobResponse[0]; // Start polling for job completion return operation.promise(); }) - .then((completeJobResponse) => { + .then(completeJobResponse => { // When job is complete, get its results const jobName = completeJobResponse[0].name; return dlp.listInspectFindings({ - name: jobName + name: jobName, }); }) - .then((results) => { + .then(results => { const findings = results[0].result.findings; if (findings.length > 0) { console.log(`Findings:`); - findings.forEach((finding) => { + findings.forEach(finding => { console.log(`\tInfo type: ${finding.infoType.name}`); console.log(`\tLikelihood: ${finding.likelihood}`); }); @@ -458,7 +508,7 @@ function inspectBigquery (projectId, datasetId, tableId, minLikelihood, maxFindi console.log(`No findings.`); } }) - .catch((err) => { + .catch(err => { console.log(`Error in inspectBigquery: ${err.message || err}`); }); // [END inspect_bigquery] @@ -470,94 +520,100 @@ const cli = require(`yargs`) // eslint-disable-line `string `, `Inspect a string using the Data Loss Prevention API.`, {}, - (opts) => inspectString( - opts.string, - opts.minLikelihood, - opts.maxFindings, - opts.infoTypes, - opts.includeQuote - ) + opts => + inspectString( + opts.string, + opts.minLikelihood, + opts.maxFindings, + opts.infoTypes, + opts.includeQuote + ) ) .command( `file `, `Inspects a local text, PNG, or JPEG file using the Data Loss Prevention API.`, {}, - (opts) => inspectFile( - opts.filepath, - opts.minLikelihood, - opts.maxFindings, - opts.infoTypes, - opts.includeQuote - ) + opts => + inspectFile( + opts.filepath, + opts.minLikelihood, + opts.maxFindings, + opts.infoTypes, + opts.includeQuote + ) ) .command( `gcsFilePromise `, `Inspects a text file stored on Google Cloud Storage using the Data Loss Prevention API and the promise pattern.`, {}, - (opts) => promiseInspectGCSFile( - opts.bucketName, - opts.fileName, - opts.minLikelihood, - opts.maxFindings, - opts.infoTypes - ) + opts => + promiseInspectGCSFile( + opts.bucketName, + opts.fileName, + opts.minLikelihood, + opts.maxFindings, + opts.infoTypes + ) ) .command( `gcsFileEvent `, `Inspects a text file stored on Google Cloud Storage using the Data Loss Prevention API and the event-handler pattern.`, {}, - (opts) => eventInspectGCSFile( - opts.bucketName, - opts.fileName, - opts.minLikelihood, - opts.maxFindings, - opts.infoTypes - ) + opts => + eventInspectGCSFile( + opts.bucketName, + opts.fileName, + opts.minLikelihood, + opts.maxFindings, + opts.infoTypes + ) ) .command( `bigquery `, `Inspects a BigQuery table using the Data Loss Prevention API.`, - { - projectId: { - type: 'string', - alias: 'p', - default: process.env.GCLOUD_PROJECT - } - }, - (opts) => inspectBigquery( - opts.projectId, - opts.datasetName, - opts.tableName, - opts.minLikelihood, - opts.maxFindings, - opts.infoTypes, - opts.includeQuote - ) + { + projectId: { + type: 'string', + alias: 'p', + default: process.env.GCLOUD_PROJECT, + }, + }, + opts => + inspectBigquery( + opts.projectId, + opts.datasetName, + opts.tableName, + opts.minLikelihood, + opts.maxFindings, + opts.infoTypes, + opts.includeQuote + ) ) .command( `datastore `, `Inspect a Datastore instance using the Data Loss Prevention API.`, - { - projectId: { - type: 'string', - alias: 'p', - default: process.env.GCLOUD_PROJECT + { + projectId: { + type: 'string', + alias: 'p', + default: process.env.GCLOUD_PROJECT, + }, + namespaceId: { + type: 'string', + alias: 'n', + default: '', + }, }, - namespaceId: { - type: 'string', - alias: 'n', - default: '' - } - }, - (opts) => inspectDatastore( - opts.projectId, - opts.namespaceId, - opts.kind, - opts.minLikelihood, - opts.maxFindings, - opts.infoTypes, - opts.includeQuote - ) + opts => + inspectDatastore( + opts.projectId, + opts.namespaceId, + opts.kind, + opts.minLikelihood, + opts.maxFindings, + opts.infoTypes, + opts.includeQuote + ) ) .option('m', { alias: 'minLikelihood', @@ -569,32 +625,35 @@ const cli = require(`yargs`) // eslint-disable-line 'UNLIKELY', 'POSSIBLE', 'LIKELY', - 'VERY_LIKELY' + 'VERY_LIKELY', ], - global: true + global: true, }) .option('f', { alias: 'maxFindings', default: 0, type: 'number', - global: true + global: true, }) .option('q', { alias: 'includeQuote', default: true, type: 'boolean', - global: true + global: true, }) .option('t', { alias: 'infoTypes', default: ['PHONE_NUMBER', 'EMAIL_ADDRESS', 'CREDIT_CARD_NUMBER'], type: 'array', global: true, - coerce: (infoTypes) => infoTypes.map((type) => { - return { name: type }; - }) + coerce: infoTypes => + infoTypes.map(type => { + return {name: type}; + }), }) - .example(`node $0 string "My phone number is (123) 456-7890 and my email address is me@somedomain.com"`) + .example( + `node $0 string "My phone number is (123) 456-7890 and my email address is me@somedomain.com"` + ) .example(`node $0 file resources/test.txt`) .example(`node $0 gcsFilePromise my-bucket my-file.txt`) .example(`node $0 gcsFileEvent my-bucket my-file.txt`) @@ -602,7 +661,9 @@ const cli = require(`yargs`) // eslint-disable-line .example(`node $0 datastore my-datastore-kind`) .wrap(120) .recommendCommands() - .epilogue(`For more information, see https://cloud.google.com/dlp/docs. Optional flags are explained at https://cloud.google.com/dlp/docs/reference/rest/v2beta1/content/inspect#InspectConfig`); + .epilogue( + `For more information, see https://cloud.google.com/dlp/docs. Optional flags are explained at https://cloud.google.com/dlp/docs/reference/rest/v2beta1/content/inspect#InspectConfig` + ); if (module === require.main) { cli.help().strict().argv; // eslint-disable-line diff --git a/dlp/metadata.js b/dlp/metadata.js index 4725d0794d..760448141b 100644 --- a/dlp/metadata.js +++ b/dlp/metadata.js @@ -15,7 +15,7 @@ 'use strict'; -function listInfoTypes (category, languageCode) { +function listInfoTypes(category, languageCode) { // [START list_info_types] // Imports the Google Cloud Data Loss Prevention library const DLP = require('@google-cloud/dlp'); @@ -29,24 +29,25 @@ function listInfoTypes (category, languageCode) { // The BCP-47 language code to use, e.g. 'en-US' // const languageCode = 'en-US'; - dlp.listInfoTypes({ - category: category, - languageCode: languageCode - }) - .then((body) => { - const infoTypes = body[0].infoTypes; - console.log(`Info types for category ${category}:`); - infoTypes.forEach((infoType) => { - console.log(`\t${infoType.name} (${infoType.displayName})`); + dlp + .listInfoTypes({ + category: category, + languageCode: languageCode, + }) + .then(body => { + const infoTypes = body[0].infoTypes; + console.log(`Info types for category ${category}:`); + infoTypes.forEach(infoType => { + console.log(`\t${infoType.name} (${infoType.displayName})`); + }); + }) + .catch(err => { + console.log(`Error in listInfoTypes: ${err.message || err}`); }); - }) - .catch((err) => { - console.log(`Error in listInfoTypes: ${err.message || err}`); - }); // [END list_info_types] } -function listRootCategories (languageCode) { +function listRootCategories(languageCode) { // [START list_categories] // Imports the Google Cloud Data Loss Prevention library const DLP = require('@google-cloud/dlp'); @@ -57,19 +58,20 @@ function listRootCategories (languageCode) { // The BCP-47 language code to use, e.g. 'en-US' // const languageCode = 'en-US'; - dlp.listRootCategories({ - languageCode: languageCode - }) - .then((body) => { - const categories = body[0].categories; - console.log(`Categories:`); - categories.forEach((category) => { - console.log(`\t${category.name}: ${category.displayName}`); + dlp + .listRootCategories({ + languageCode: languageCode, + }) + .then(body => { + const categories = body[0].categories; + console.log(`Categories:`); + categories.forEach(category => { + console.log(`\t${category.name}: ${category.displayName}`); + }); + }) + .catch(err => { + console.log(`Error in listRootCategories: ${err.message || err}`); }); - }) - .catch((err) => { - console.log(`Error in listRootCategories: ${err.message || err}`); - }); // [END list_categories] } @@ -79,19 +81,19 @@ const cli = require(`yargs`) `infoTypes `, `List types of sensitive information within a category.`, {}, - (opts) => listInfoTypes(opts.category, opts.languageCode) + opts => listInfoTypes(opts.category, opts.languageCode) ) .command( `categories`, `List root categories of sensitive information.`, {}, - (opts) => listRootCategories(opts.languageCode) + opts => listRootCategories(opts.languageCode) ) .option('l', { alias: 'languageCode', default: 'en-US', type: 'string', - global: true + global: true, }) .example(`node $0 infoTypes GOVERNMENT`) .example(`node $0 categories`) diff --git a/dlp/package.json b/dlp/package.json index 24ac62fa84..556e91b13a 100644 --- a/dlp/package.json +++ b/dlp/package.json @@ -5,64 +5,13 @@ "private": true, "license": "Apache-2.0", "author": "Google Inc.", - "repository": { - "type": "git", - "url": "https://github.com/GoogleCloudPlatform/nodejs-docs-samples.git" - }, + "repository": "googleapis/nodejs-dlp", "engines": { "node": ">=4.3.2" }, "scripts": { - "lint": "samples lint", - "pretest": "npm run lint", "test": "samples test run --cmd ava -- -T 1m --verbose system-test/*.test.js" }, - "cloud-repo-tools": { - "requiresKeyFile": true, - "requiresProjectId": true, - "requiredEnvVars": [ - "DLP_DEID_WRAPPED_KEY", - "DLP_DEID_KEY_NAME" - ], - "product": "dlp", - "samples": [ - { - "id": "inspect", - "name": "Inspect", - "file": "inspect.js", - "docs_link": "https://cloud.google.com/dlp/docs", - "usage": "node inspect.js --help" - }, - { - "id": "redact", - "name": "Redact", - "file": "redact.js", - "docs_link": "https://cloud.google.com/dlp/docs", - "usage": "node redact.js --help" - }, - { - "id": "metadata", - "name": "Metadata", - "file": "metadata.js", - "docs_link": "https://cloud.google.com/dlp/docs", - "usage": "node metadata.js --help" - }, - { - "id": "deid", - "name": "DeID", - "file": "deid.js", - "docs_link": "https://cloud.google.com/dlp/docs", - "usage": "node deid.js --help" - }, - { - "id": "risk", - "name": "Risk Analysis", - "file": "risk.js", - "docs_link": "https://cloud.google.com/dlp/docs", - "usage": "node risk.js --help" - } - ] - }, "dependencies": { "@google-cloud/bigquery": "^0.9.6", "@google-cloud/dlp": "^0.1.0", diff --git a/dlp/quickstart.js b/dlp/quickstart.js index 392e8008fb..09f54af2db 100644 --- a/dlp/quickstart.js +++ b/dlp/quickstart.js @@ -32,16 +32,13 @@ const minLikelihood = 'LIKELIHOOD_UNSPECIFIED'; const maxFindings = 0; // The infoTypes of information to match -const infoTypes = [ - { name: 'US_MALE_NAME' }, - { name: 'US_FEMALE_NAME' } -]; +const infoTypes = [{name: 'US_MALE_NAME'}, {name: 'US_FEMALE_NAME'}]; // Whether to include the matching string const includeQuote = true; // Construct items to inspect -const items = [{ type: 'text/plain', value: string }]; +const items = [{type: 'text/plain', value: string}]; // Construct request const request = { @@ -49,18 +46,19 @@ const request = { infoTypes: infoTypes, minLikelihood: minLikelihood, maxFindings: maxFindings, - includeQuote: includeQuote + includeQuote: includeQuote, }, - items: items + items: items, }; // Run request -dlp.inspectContent(request) - .then((response) => { +dlp + .inspectContent(request) + .then(response => { const findings = response[0].results[0].findings; if (findings.length > 0) { console.log(`Findings:`); - findings.forEach((finding) => { + findings.forEach(finding => { if (includeQuote) { console.log(`\tQuote: ${finding.quote}`); } @@ -71,7 +69,7 @@ dlp.inspectContent(request) console.log(`No findings.`); } }) - .catch((err) => { + .catch(err => { console.error(`Error in inspectString: ${err.message || err}`); }); // [END quickstart] diff --git a/dlp/redact.js b/dlp/redact.js index 2bc1c23902..d7b8cf6435 100644 --- a/dlp/redact.js +++ b/dlp/redact.js @@ -15,7 +15,7 @@ 'use strict'; -function redactString (string, replaceString, minLikelihood, infoTypes) { +function redactString(string, replaceString, minLikelihood, infoTypes) { // [START redact_string] // Imports the Google Cloud Data Loss Prevention library const DLP = require('@google-cloud/dlp'); @@ -35,36 +35,37 @@ function redactString (string, replaceString, minLikelihood, infoTypes) { // The infoTypes of information to redact // const infoTypes = [{ name: 'US_MALE_NAME' }, { name: 'US_FEMALE_NAME' }]; - const items = [{ type: 'text/plain', value: string }]; + const items = [{type: 'text/plain', value: string}]; - const replaceConfigs = infoTypes.map((infoType) => { + const replaceConfigs = infoTypes.map(infoType => { return { infoType: infoType, - replaceWith: replaceString + replaceWith: replaceString, }; }); const request = { inspectConfig: { infoTypes: infoTypes, - minLikelihood: minLikelihood + minLikelihood: minLikelihood, }, items: items, - replaceConfigs: replaceConfigs + replaceConfigs: replaceConfigs, }; - dlp.redactContent(request) - .then((body) => { + dlp + .redactContent(request) + .then(body => { const results = body[0].items[0].value; console.log(results); }) - .catch((err) => { + .catch(err => { console.log(`Error in redactString: ${err.message || err}`); }); // [END redact_string] } -function redactImage (filepath, minLikelihood, infoTypes, outputPath) { +function redactImage(filepath, minLikelihood, infoTypes, outputPath) { // [START redact_image] // Imports required Node.js libraries const mime = require('mime'); @@ -88,30 +89,33 @@ function redactImage (filepath, minLikelihood, infoTypes, outputPath) { // The local path to save the resulting image to. // const outputPath = 'result.png'; - const fileItems = [{ - type: mime.lookup(filepath) || 'application/octet-stream', - data: Buffer.from(fs.readFileSync(filepath)).toString('base64') - }]; + const fileItems = [ + { + type: mime.lookup(filepath) || 'application/octet-stream', + data: Buffer.from(fs.readFileSync(filepath)).toString('base64'), + }, + ]; - const imageRedactionConfigs = infoTypes.map((infoType) => { - return { infoType: infoType }; + const imageRedactionConfigs = infoTypes.map(infoType => { + return {infoType: infoType}; }); const request = { inspectConfig: { - minLikelihood: minLikelihood + minLikelihood: minLikelihood, }, imageRedactionConfigs: imageRedactionConfigs, - items: fileItems + items: fileItems, }; - dlp.redactContent(request) - .then((response) => { + dlp + .redactContent(request) + .then(response => { const image = response[0].items[0].data; fs.writeFileSync(outputPath, image); console.log(`Saved image redaction results to path: ${outputPath}`); }) - .catch((err) => { + .catch(err => { console.log(`Error in redactImage: ${err.message || err}`); }); // [END redact_image] @@ -123,13 +127,25 @@ const cli = require(`yargs`) `string `, `Redact sensitive data from a string using the Data Loss Prevention API.`, {}, - (opts) => redactString(opts.string, opts.replaceString, opts.minLikelihood, opts.infoTypes) + opts => + redactString( + opts.string, + opts.replaceString, + opts.minLikelihood, + opts.infoTypes + ) ) .command( `image `, `Redact sensitive data from an image using the Data Loss Prevention API.`, {}, - (opts) => redactImage(opts.filepath, opts.minLikelihood, opts.infoTypes, opts.outputPath) + opts => + redactImage( + opts.filepath, + opts.minLikelihood, + opts.infoTypes, + opts.outputPath + ) ) .option('m', { alias: 'minLikelihood', @@ -141,24 +157,29 @@ const cli = require(`yargs`) 'UNLIKELY', 'POSSIBLE', 'LIKELY', - 'VERY_LIKELY' + 'VERY_LIKELY', ], - global: true + global: true, }) .option('t', { alias: 'infoTypes', required: true, type: 'array', global: true, - coerce: (infoTypes) => infoTypes.map((type) => { - return { name: type }; - }) + coerce: infoTypes => + infoTypes.map(type => { + return {name: type}; + }), }) .example(`node $0 string "My name is Gary" "REDACTED" -t US_MALE_NAME`) - .example(`node $0 image resources/test.png redaction_result.png -t US_MALE_NAME`) + .example( + `node $0 image resources/test.png redaction_result.png -t US_MALE_NAME` + ) .wrap(120) .recommendCommands() - .epilogue(`For more information, see https://cloud.google.com/dlp/docs. Optional flags are explained at https://cloud.google.com/dlp/docs/reference/rest/v2beta1/content/inspect#InspectConfig`); + .epilogue( + `For more information, see https://cloud.google.com/dlp/docs. Optional flags are explained at https://cloud.google.com/dlp/docs/reference/rest/v2beta1/content/inspect#InspectConfig` + ); if (module === require.main) { cli.help().strict().argv; // eslint-disable-line diff --git a/dlp/risk.js b/dlp/risk.js index 6faa1c4a7e..983030224d 100644 --- a/dlp/risk.js +++ b/dlp/risk.js @@ -15,7 +15,7 @@ 'use strict'; -function numericalRiskAnalysis (projectId, datasetId, tableId, columnName) { +function numericalRiskAnalysis(projectId, datasetId, tableId, columnName) { // [START numerical_risk] // Imports the Google Cloud Data Loss Prevention library const DLP = require('@google-cloud/dlp'); @@ -39,7 +39,7 @@ function numericalRiskAnalysis (projectId, datasetId, tableId, columnName) { const sourceTable = { projectId: projectId, datasetId: datasetId, - tableId: tableId + tableId: tableId, }; // Construct request for creating a risk analysis job @@ -47,26 +47,31 @@ function numericalRiskAnalysis (projectId, datasetId, tableId, columnName) { privacyMetric: { numericalStatsConfig: { field: { - columnName: columnName - } - } + columnName: columnName, + }, + }, }, - sourceTable: sourceTable + sourceTable: sourceTable, }; // Create helper function for unpacking values - const getValue = (obj) => obj[Object.keys(obj)[0]]; + const getValue = obj => obj[Object.keys(obj)[0]]; // Run risk analysis job - dlp.analyzeDataSourceRisk(request) - .then((response) => { + dlp + .analyzeDataSourceRisk(request) + .then(response => { const operation = response[0]; return operation.promise(); }) - .then((completedJobResponse) => { + .then(completedJobResponse => { const results = completedJobResponse[0].numericalStatsResult; - console.log(`Value Range: [${getValue(results.minValue)}, ${getValue(results.maxValue)}]`); + console.log( + `Value Range: [${getValue(results.minValue)}, ${getValue( + results.maxValue + )}]` + ); // Print unique quantile values let tempValue = null; @@ -74,20 +79,22 @@ function numericalRiskAnalysis (projectId, datasetId, tableId, columnName) { const value = getValue(result); // Only print new values - if ((tempValue !== value) && - !(tempValue && tempValue.equals && tempValue.equals(value))) { + if ( + tempValue !== value && + !(tempValue && tempValue.equals && tempValue.equals(value)) + ) { console.log(`Value at ${percent}% quantile: ${value}`); tempValue = value; } }); }) - .catch((err) => { + .catch(err => { console.log(`Error in numericalRiskAnalysis: ${err.message || err}`); }); - // [END numerical_risk] + // [END numerical_risk] } -function categoricalRiskAnalysis (projectId, datasetId, tableId, columnName) { +function categoricalRiskAnalysis(projectId, datasetId, tableId, columnName) { // [START categorical_risk] // Imports the Google Cloud Data Loss Prevention library const DLP = require('@google-cloud/dlp'); @@ -110,7 +117,7 @@ function categoricalRiskAnalysis (projectId, datasetId, tableId, columnName) { const sourceTable = { projectId: projectId, datasetId: datasetId, - tableId: tableId + tableId: tableId, }; // Construct request for creating a risk analysis job @@ -118,38 +125,47 @@ function categoricalRiskAnalysis (projectId, datasetId, tableId, columnName) { privacyMetric: { categoricalStatsConfig: { field: { - columnName: columnName - } - } + columnName: columnName, + }, + }, }, - sourceTable: sourceTable + sourceTable: sourceTable, }; // Create helper function for unpacking values - const getValue = (obj) => obj[Object.keys(obj)[0]]; + const getValue = obj => obj[Object.keys(obj)[0]]; // Run risk analysis job - dlp.analyzeDataSourceRisk(request) - .then((response) => { + dlp + .analyzeDataSourceRisk(request) + .then(response => { const operation = response[0]; return operation.promise(); }) - .then((completedJobResponse) => { - const results = completedJobResponse[0].categoricalStatsResult.valueFrequencyHistogramBuckets[0]; - console.log(`Most common value occurs ${results.valueFrequencyUpperBound} time(s)`); - console.log(`Least common value occurs ${results.valueFrequencyLowerBound} time(s)`); + .then(completedJobResponse => { + const results = + completedJobResponse[0].categoricalStatsResult + .valueFrequencyHistogramBuckets[0]; + console.log( + `Most common value occurs ${results.valueFrequencyUpperBound} time(s)` + ); + console.log( + `Least common value occurs ${results.valueFrequencyLowerBound} time(s)` + ); console.log(`${results.bucketSize} unique values total.`); - results.bucketValues.forEach((bucket) => { - console.log(`Value ${getValue(bucket.value)} occurs ${bucket.count} time(s).`); + results.bucketValues.forEach(bucket => { + console.log( + `Value ${getValue(bucket.value)} occurs ${bucket.count} time(s).` + ); }); }) - .catch((err) => { + .catch(err => { console.log(`Error in categoricalRiskAnalysis: ${err.message || err}`); }); - // [END categorical_risk] + // [END categorical_risk] } -function kAnonymityAnalysis (projectId, datasetId, tableId, quasiIds) { +function kAnonymityAnalysis(projectId, datasetId, tableId, quasiIds) { // [START k_anonymity] // Imports the Google Cloud Data Loss Prevention library const DLP = require('@google-cloud/dlp'); @@ -172,45 +188,56 @@ function kAnonymityAnalysis (projectId, datasetId, tableId, quasiIds) { const sourceTable = { projectId: projectId, datasetId: datasetId, - tableId: tableId + tableId: tableId, }; // Construct request for creating a risk analysis job const request = { privacyMetric: { kAnonymityConfig: { - quasiIds: quasiIds - } + quasiIds: quasiIds, + }, }, - sourceTable: sourceTable + sourceTable: sourceTable, }; // Create helper function for unpacking values - const getValue = (obj) => obj[Object.keys(obj)[0]]; + const getValue = obj => obj[Object.keys(obj)[0]]; // Run risk analysis job - dlp.analyzeDataSourceRisk(request) - .then((response) => { + dlp + .analyzeDataSourceRisk(request) + .then(response => { const operation = response[0]; return operation.promise(); }) - .then((completedJobResponse) => { - const results = completedJobResponse[0].kAnonymityResult.equivalenceClassHistogramBuckets[0]; - console.log(`Bucket size range: [${results.equivalenceClassSizeLowerBound}, ${results.equivalenceClassSizeUpperBound}]`); - - results.bucketValues.forEach((bucket) => { + .then(completedJobResponse => { + const results = + completedJobResponse[0].kAnonymityResult + .equivalenceClassHistogramBuckets[0]; + console.log( + `Bucket size range: [${results.equivalenceClassSizeLowerBound}, ${results.equivalenceClassSizeUpperBound}]` + ); + + results.bucketValues.forEach(bucket => { const quasiIdValues = bucket.quasiIdsValues.map(getValue).join(', '); console.log(` Quasi-ID values: {${quasiIdValues}}`); console.log(` Class size: ${bucket.equivalenceClassSize}`); }); }) - .catch((err) => { + .catch(err => { console.log(`Error in kAnonymityAnalysis: ${err.message || err}`); }); - // [END k_anonymity] + // [END k_anonymity] } -function lDiversityAnalysis (projectId, datasetId, tableId, sensitiveAttribute, quasiIds) { +function lDiversityAnalysis( + projectId, + datasetId, + tableId, + sensitiveAttribute, + quasiIds +) { // [START l_diversity] // Imports the Google Cloud Data Loss Prevention library const DLP = require('@google-cloud/dlp'); @@ -236,7 +263,7 @@ function lDiversityAnalysis (projectId, datasetId, tableId, sensitiveAttribute, const sourceTable = { projectId: projectId, datasetId: datasetId, - tableId: tableId + tableId: tableId, }; // Construct request for creating a risk analysis job @@ -245,39 +272,48 @@ function lDiversityAnalysis (projectId, datasetId, tableId, sensitiveAttribute, lDiversityConfig: { quasiIds: quasiIds, sensitiveAttribute: { - columnName: sensitiveAttribute - } - } + columnName: sensitiveAttribute, + }, + }, }, - sourceTable: sourceTable + sourceTable: sourceTable, }; // Create helper function for unpacking values - const getValue = (obj) => obj[Object.keys(obj)[0]]; + const getValue = obj => obj[Object.keys(obj)[0]]; // Run risk analysis job - dlp.analyzeDataSourceRisk(request) - .then((response) => { + dlp + .analyzeDataSourceRisk(request) + .then(response => { const operation = response[0]; return operation.promise(); }) - .then((completedJobResponse) => { - const results = completedJobResponse[0].lDiversityResult.sensitiveValueFrequencyHistogramBuckets[0]; - - console.log(`Bucket size range: [${results.sensitiveValueFrequencyLowerBound}, ${results.sensitiveValueFrequencyUpperBound}]`); - results.bucketValues.forEach((bucket) => { + .then(completedJobResponse => { + const results = + completedJobResponse[0].lDiversityResult + .sensitiveValueFrequencyHistogramBuckets[0]; + + console.log( + `Bucket size range: [${results.sensitiveValueFrequencyLowerBound}, ${results.sensitiveValueFrequencyUpperBound}]` + ); + results.bucketValues.forEach(bucket => { const quasiIdValues = bucket.quasiIdsValues.map(getValue).join(', '); console.log(` Quasi-ID values: {${quasiIdValues}}`); console.log(` Class size: ${bucket.equivalenceClassSize}`); - bucket.topSensitiveValues.forEach((valueObj) => { - console.log(` Sensitive value ${getValue(valueObj.value)} occurs ${valueObj.count} time(s).`); + bucket.topSensitiveValues.forEach(valueObj => { + console.log( + ` Sensitive value ${getValue( + valueObj.value + )} occurs ${valueObj.count} time(s).` + ); }); }); }) - .catch((err) => { + .catch(err => { console.log(`Error in lDiversityAnalysis: ${err.message || err}`); }); - // [END l_diversity] + // [END l_diversity] } const cli = require(`yargs`) // eslint-disable-line @@ -286,61 +322,73 @@ const cli = require(`yargs`) // eslint-disable-line `numerical `, `Computes risk metrics of a column of numbers in a Google BigQuery table.`, {}, - (opts) => numericalRiskAnalysis( - opts.projectId, - opts.datasetId, - opts.tableId, - opts.columnName - ) + opts => + numericalRiskAnalysis( + opts.projectId, + opts.datasetId, + opts.tableId, + opts.columnName + ) ) .command( `categorical `, `Computes risk metrics of a column of data in a Google BigQuery table.`, {}, - (opts) => categoricalRiskAnalysis( - opts.projectId, - opts.datasetId, - opts.tableId, - opts.columnName - ) + opts => + categoricalRiskAnalysis( + opts.projectId, + opts.datasetId, + opts.tableId, + opts.columnName + ) ) .command( `kAnonymity [quasiIdColumnNames..]`, `Computes the k-anonymity of a column set in a Google BigQuery table.`, {}, - (opts) => kAnonymityAnalysis( - opts.projectId, - opts.datasetId, - opts.tableId, - opts.quasiIdColumnNames.map((f) => { - return { columnName: f }; - }) - ) + opts => + kAnonymityAnalysis( + opts.projectId, + opts.datasetId, + opts.tableId, + opts.quasiIdColumnNames.map(f => { + return {columnName: f}; + }) + ) ) .command( `lDiversity [quasiIdColumnNames..]`, `Computes the l-diversity of a column set in a Google BigQuery table.`, {}, - (opts) => lDiversityAnalysis( - opts.projectId, - opts.datasetId, - opts.tableId, - opts.sensitiveAttribute, - opts.quasiIdColumnNames.map((f) => { - return { columnName: f }; - }) - ) + opts => + lDiversityAnalysis( + opts.projectId, + opts.datasetId, + opts.tableId, + opts.sensitiveAttribute, + opts.quasiIdColumnNames.map(f => { + return {columnName: f}; + }) + ) ) .option('p', { type: 'string', alias: 'projectId', default: process.env.GCLOUD_PROJECT, - global: true + global: true, }) - .example(`node $0 numerical nhtsa_traffic_fatalities accident_2015 state_number -p bigquery-public-data`) - .example(`node $0 categorical nhtsa_traffic_fatalities accident_2015 state_name -p bigquery-public-data`) - .example(`node $0 kAnonymity nhtsa_traffic_fatalities accident_2015 state_number county -p bigquery-public-data`) - .example(`node $0 lDiversity nhtsa_traffic_fatalities accident_2015 city state_number county -p bigquery-public-data`) + .example( + `node $0 numerical nhtsa_traffic_fatalities accident_2015 state_number -p bigquery-public-data` + ) + .example( + `node $0 categorical nhtsa_traffic_fatalities accident_2015 state_name -p bigquery-public-data` + ) + .example( + `node $0 kAnonymity nhtsa_traffic_fatalities accident_2015 state_number county -p bigquery-public-data` + ) + .example( + `node $0 lDiversity nhtsa_traffic_fatalities accident_2015 city state_number county -p bigquery-public-data` + ) .wrap(120) .recommendCommands() .epilogue(`For more information, see https://cloud.google.com/dlp/docs.`); diff --git a/dlp/system-test/.eslintrc.yml b/dlp/system-test/.eslintrc.yml new file mode 100644 index 0000000000..c0289282a6 --- /dev/null +++ b/dlp/system-test/.eslintrc.yml @@ -0,0 +1,5 @@ +--- +rules: + node/no-unpublished-require: off + node/no-unsupported-features: off + no-empty: off diff --git a/dlp/system-test/deid.test.js b/dlp/system-test/deid.test.js index b7348a9314..14b64e7871 100644 --- a/dlp/system-test/deid.test.js +++ b/dlp/system-test/deid.test.js @@ -31,34 +31,49 @@ const keyName = process.env.DLP_DEID_KEY_NAME; test.before(tools.checkCredentials); // deidentify_masking -test(`should mask sensitive data in a string`, async (t) => { - const output = await tools.runAsync(`${cmd} mask "${harmfulString}" -c x -n 5`, cwd); +test(`should mask sensitive data in a string`, async t => { + const output = await tools.runAsync( + `${cmd} mask "${harmfulString}" -c x -n 5`, + cwd + ); t.is(output, 'My SSN is xxxxx9127'); }); -test(`should ignore insensitive data when masking a string`, async (t) => { +test(`should ignore insensitive data when masking a string`, async t => { const output = await tools.runAsync(`${cmd} mask "${harmlessString}"`, cwd); t.is(output, harmlessString); }); -test(`should handle masking errors`, async (t) => { - const output = await tools.runAsync(`${cmd} mask "${harmfulString}" -n -1`, cwd); +test(`should handle masking errors`, async t => { + const output = await tools.runAsync( + `${cmd} mask "${harmfulString}" -n -1`, + cwd + ); t.regex(output, /Error in deidentifyWithMask/); }); // deidentify_fpe -test(`should FPE encrypt sensitive data in a string`, async (t) => { - const output = await tools.runAsync(`${cmd} fpe "${harmfulString}" ${wrappedKey} ${keyName} -a NUMERIC`, cwd); +test(`should FPE encrypt sensitive data in a string`, async t => { + const output = await tools.runAsync( + `${cmd} fpe "${harmfulString}" ${wrappedKey} ${keyName} -a NUMERIC`, + cwd + ); t.regex(output, /My SSN is \d{9}/); t.not(output, harmfulString); }); -test(`should ignore insensitive data when FPE encrypting a string`, async (t) => { - const output = await tools.runAsync(`${cmd} fpe "${harmlessString}" ${wrappedKey} ${keyName}`, cwd); +test(`should ignore insensitive data when FPE encrypting a string`, async t => { + const output = await tools.runAsync( + `${cmd} fpe "${harmlessString}" ${wrappedKey} ${keyName}`, + cwd + ); t.is(output, harmlessString); }); -test(`should handle FPE encryption errors`, async (t) => { - const output = await tools.runAsync(`${cmd} fpe "${harmfulString}" ${wrappedKey} BAD_KEY_NAME`, cwd); +test(`should handle FPE encryption errors`, async t => { + const output = await tools.runAsync( + `${cmd} fpe "${harmfulString}" ${wrappedKey} BAD_KEY_NAME`, + cwd + ); t.regex(output, /Error in deidentifyWithFpe/); }); diff --git a/dlp/system-test/inspect.test.js b/dlp/system-test/inspect.test.js index 922f83e96e..464f2e0e9e 100644 --- a/dlp/system-test/inspect.test.js +++ b/dlp/system-test/inspect.test.js @@ -25,128 +25,203 @@ const cwd = path.join(__dirname, `..`); test.before(tools.checkCredentials); // inspect_string -test(`should inspect a string`, async (t) => { - const output = await tools.runAsync(`${cmd} string "I'm Gary and my email is gary@example.com"`, cwd); +test(`should inspect a string`, async t => { + const output = await tools.runAsync( + `${cmd} string "I'm Gary and my email is gary@example.com"`, + cwd + ); t.regex(output, /Info type: EMAIL_ADDRESS/); }); -test(`should handle a string with no sensitive data`, async (t) => { +test(`should handle a string with no sensitive data`, async t => { const output = await tools.runAsync(`${cmd} string "foo"`, cwd); t.is(output, 'No findings.'); }); -test(`should report string inspection handling errors`, async (t) => { - const output = await tools.runAsync(`${cmd} string "I'm Gary and my email is gary@example.com" -t BAD_TYPE`, cwd); +test(`should report string inspection handling errors`, async t => { + const output = await tools.runAsync( + `${cmd} string "I'm Gary and my email is gary@example.com" -t BAD_TYPE`, + cwd + ); t.regex(output, /Error in inspectString/); }); // inspect_file -test(`should inspect a local text file`, async (t) => { +test(`should inspect a local text file`, async t => { const output = await tools.runAsync(`${cmd} file resources/test.txt`, cwd); t.regex(output, /Info type: PHONE_NUMBER/); t.regex(output, /Info type: EMAIL_ADDRESS/); }); -test(`should inspect a local image file`, async (t) => { +test(`should inspect a local image file`, async t => { const output = await tools.runAsync(`${cmd} file resources/test.png`, cwd); t.regex(output, /Info type: PHONE_NUMBER/); }); -test(`should handle a local file with no sensitive data`, async (t) => { - const output = await tools.runAsync(`${cmd} file resources/harmless.txt`, cwd); +test(`should handle a local file with no sensitive data`, async t => { + const output = await tools.runAsync( + `${cmd} file resources/harmless.txt`, + cwd + ); t.is(output, 'No findings.'); }); -test(`should report local file handling errors`, async (t) => { - const output = await tools.runAsync(`${cmd} file resources/harmless.txt -t BAD_TYPE`, cwd); +test(`should report local file handling errors`, async t => { + const output = await tools.runAsync( + `${cmd} file resources/harmless.txt -t BAD_TYPE`, + cwd + ); t.regex(output, /Error in inspectFile/); }); // inspect_gcs_file_event -test.serial(`should inspect a GCS text file with event handlers`, async (t) => { - const output = await tools.runAsync(`${cmd} gcsFileEvent nodejs-docs-samples-dlp test.txt`, cwd); +test.serial(`should inspect a GCS text file with event handlers`, async t => { + const output = await tools.runAsync( + `${cmd} gcsFileEvent nodejs-docs-samples-dlp test.txt`, + cwd + ); t.regex(output, /Processed \d+ of approximately \d+ bytes./); t.regex(output, /Info type: PHONE_NUMBER/); t.regex(output, /Info type: EMAIL_ADDRESS/); }); -test.serial(`should inspect multiple GCS text files with event handlers`, async (t) => { - const output = await tools.runAsync(`${cmd} gcsFileEvent nodejs-docs-samples-dlp *.txt`, cwd); - t.regex(output, /Processed \d+ of approximately \d+ bytes./); - t.regex(output, /Info type: PHONE_NUMBER/); - t.regex(output, /Info type: EMAIL_ADDRESS/); -}); - -test.serial(`should handle a GCS file with no sensitive data with event handlers`, async (t) => { - const output = await tools.runAsync(`${cmd} gcsFileEvent nodejs-docs-samples-dlp harmless.txt`, cwd); - t.regex(output, /Processed \d+ of approximately \d+ bytes./); - t.regex(output, /No findings./); -}); - -test.serial(`should report GCS file handling errors with event handlers`, async (t) => { - const output = await tools.runAsync(`${cmd} gcsFileEvent nodejs-docs-samples-dlp harmless.txt -t BAD_TYPE`, cwd); - t.regex(output, /Error in eventInspectGCSFile/); -}); +test.serial( + `should inspect multiple GCS text files with event handlers`, + async t => { + const output = await tools.runAsync( + `${cmd} gcsFileEvent nodejs-docs-samples-dlp *.txt`, + cwd + ); + t.regex(output, /Processed \d+ of approximately \d+ bytes./); + t.regex(output, /Info type: PHONE_NUMBER/); + t.regex(output, /Info type: EMAIL_ADDRESS/); + } +); + +test.serial( + `should handle a GCS file with no sensitive data with event handlers`, + async t => { + const output = await tools.runAsync( + `${cmd} gcsFileEvent nodejs-docs-samples-dlp harmless.txt`, + cwd + ); + t.regex(output, /Processed \d+ of approximately \d+ bytes./); + t.regex(output, /No findings./); + } +); + +test.serial( + `should report GCS file handling errors with event handlers`, + async t => { + const output = await tools.runAsync( + `${cmd} gcsFileEvent nodejs-docs-samples-dlp harmless.txt -t BAD_TYPE`, + cwd + ); + t.regex(output, /Error in eventInspectGCSFile/); + } +); // inspect_gcs_file_promise -test.serial(`should inspect a GCS text file with promises`, async (t) => { - const output = await tools.runAsync(`${cmd} gcsFilePromise nodejs-docs-samples-dlp test.txt`, cwd); +test.serial(`should inspect a GCS text file with promises`, async t => { + const output = await tools.runAsync( + `${cmd} gcsFilePromise nodejs-docs-samples-dlp test.txt`, + cwd + ); t.regex(output, /Info type: PHONE_NUMBER/); t.regex(output, /Info type: EMAIL_ADDRESS/); }); -test.serial(`should inspect multiple GCS text files with promises`, async (t) => { - const output = await tools.runAsync(`${cmd} gcsFilePromise nodejs-docs-samples-dlp *.txt`, cwd); +test.serial(`should inspect multiple GCS text files with promises`, async t => { + const output = await tools.runAsync( + `${cmd} gcsFilePromise nodejs-docs-samples-dlp *.txt`, + cwd + ); t.regex(output, /Info type: PHONE_NUMBER/); t.regex(output, /Info type: EMAIL_ADDRESS/); }); -test.serial(`should handle a GCS file with no sensitive data with promises`, async (t) => { - const output = await tools.runAsync(`${cmd} gcsFilePromise nodejs-docs-samples-dlp harmless.txt`, cwd); - t.is(output, 'No findings.'); -}); - -test.serial(`should report GCS file handling errors with promises`, async (t) => { - const output = await tools.runAsync(`${cmd} gcsFilePromise nodejs-docs-samples-dlp harmless.txt -t BAD_TYPE`, cwd); +test.serial( + `should handle a GCS file with no sensitive data with promises`, + async t => { + const output = await tools.runAsync( + `${cmd} gcsFilePromise nodejs-docs-samples-dlp harmless.txt`, + cwd + ); + t.is(output, 'No findings.'); + } +); + +test.serial(`should report GCS file handling errors with promises`, async t => { + const output = await tools.runAsync( + `${cmd} gcsFilePromise nodejs-docs-samples-dlp harmless.txt -t BAD_TYPE`, + cwd + ); t.regex(output, /Error in promiseInspectGCSFile/); }); // inspect_datastore -test.serial(`should inspect Datastore`, async (t) => { - const output = await tools.runAsync(`${cmd} datastore Person --namespaceId DLP`, cwd); +test.serial(`should inspect Datastore`, async t => { + const output = await tools.runAsync( + `${cmd} datastore Person --namespaceId DLP`, + cwd + ); t.regex(output, /Info type: EMAIL_ADDRESS/); }); -test.serial(`should handle Datastore with no sensitive data`, async (t) => { - const output = await tools.runAsync(`${cmd} datastore Harmless --namespaceId DLP`, cwd); +test.serial(`should handle Datastore with no sensitive data`, async t => { + const output = await tools.runAsync( + `${cmd} datastore Harmless --namespaceId DLP`, + cwd + ); t.is(output, 'No findings.'); }); -test.serial(`should report Datastore errors`, async (t) => { - const output = await tools.runAsync(`${cmd} datastore Harmless --namespaceId DLP -t BAD_TYPE`, cwd); +test.serial(`should report Datastore errors`, async t => { + const output = await tools.runAsync( + `${cmd} datastore Harmless --namespaceId DLP -t BAD_TYPE`, + cwd + ); t.regex(output, /Error in inspectDatastore/); }); // inspect_bigquery -test.serial(`should inspect a Bigquery table`, async (t) => { - const output = await tools.runAsync(`${cmd} bigquery integration_tests_dlp harmful`, cwd); - t.regex(output, /Info type: CREDIT_CARD_NUMBER/); -}); - -test.serial(`should handle a Bigquery table with no sensitive data`, async (t) => { - const output = await tools.runAsync(`${cmd} bigquery integration_tests_dlp harmless `, cwd); - t.is(output, 'No findings.'); +test.serial(`should inspect a Bigquery table`, async t => { + const output = await tools.runAsync( + `${cmd} bigquery integration_tests_dlp harmful`, + cwd + ); + t.regex(output, /Info type: PHONE_NUMBER/); }); -test.serial(`should report Bigquery table handling errors`, async (t) => { - const output = await tools.runAsync(`${cmd} bigquery integration_tests_dlp harmless -t BAD_TYPE`, cwd); +test.serial( + `should handle a Bigquery table with no sensitive data`, + async t => { + const output = await tools.runAsync( + `${cmd} bigquery integration_tests_dlp harmless `, + cwd + ); + t.is(output, 'No findings.'); + } +); + +test.serial(`should report Bigquery table handling errors`, async t => { + const output = await tools.runAsync( + `${cmd} bigquery integration_tests_dlp harmless -t BAD_TYPE`, + cwd + ); t.regex(output, /Error in inspectBigquery/); }); // CLI options -test(`should have a minLikelihood option`, async (t) => { - const promiseA = tools.runAsync(`${cmd} string "My phone number is (123) 456-7890." -m POSSIBLE`, cwd); - const promiseB = tools.runAsync(`${cmd} string "My phone number is (123) 456-7890." -m UNLIKELY`, cwd); +test(`should have a minLikelihood option`, async t => { + const promiseA = tools.runAsync( + `${cmd} string "My phone number is (123) 456-7890." -m POSSIBLE`, + cwd + ); + const promiseB = tools.runAsync( + `${cmd} string "My phone number is (123) 456-7890." -m UNLIKELY`, + cwd + ); const outputA = await promiseA; t.truthy(outputA); @@ -156,9 +231,15 @@ test(`should have a minLikelihood option`, async (t) => { t.regex(outputB, /PHONE_NUMBER/); }); -test(`should have a maxFindings option`, async (t) => { - const promiseA = tools.runAsync(`${cmd} string "My email is gary@example.com and my phone number is (223) 456-7890." -f 1`, cwd); - const promiseB = tools.runAsync(`${cmd} string "My email is gary@example.com and my phone number is (223) 456-7890." -f 2`, cwd); +test(`should have a maxFindings option`, async t => { + const promiseA = tools.runAsync( + `${cmd} string "My email is gary@example.com and my phone number is (223) 456-7890." -f 1`, + cwd + ); + const promiseB = tools.runAsync( + `${cmd} string "My email is gary@example.com and my phone number is (223) 456-7890." -f 2`, + cwd + ); const outputA = await promiseA; t.not(outputA.includes('PHONE_NUMBER'), outputA.includes('EMAIL_ADDRESS')); // Exactly one of these should be included @@ -168,9 +249,15 @@ test(`should have a maxFindings option`, async (t) => { t.regex(outputB, /EMAIL_ADDRESS/); }); -test(`should have an option to include quotes`, async (t) => { - const promiseA = tools.runAsync(`${cmd} string "My phone number is (223) 456-7890." -q false`, cwd); - const promiseB = tools.runAsync(`${cmd} string "My phone number is (223) 456-7890."`, cwd); +test(`should have an option to include quotes`, async t => { + const promiseA = tools.runAsync( + `${cmd} string "My phone number is (223) 456-7890." -q false`, + cwd + ); + const promiseB = tools.runAsync( + `${cmd} string "My phone number is (223) 456-7890."`, + cwd + ); const outputA = await promiseA; t.truthy(outputA); @@ -180,9 +267,15 @@ test(`should have an option to include quotes`, async (t) => { t.regex(outputB, /\(223\) 456-7890/); }); -test(`should have an option to filter results by infoType`, async (t) => { - const promiseA = tools.runAsync(`${cmd} string "My email is gary@example.com and my phone number is (223) 456-7890."`, cwd); - const promiseB = tools.runAsync(`${cmd} string "My email is gary@example.com and my phone number is (223) 456-7890." -t PHONE_NUMBER`, cwd); +test(`should have an option to filter results by infoType`, async t => { + const promiseA = tools.runAsync( + `${cmd} string "My email is gary@example.com and my phone number is (223) 456-7890."`, + cwd + ); + const promiseB = tools.runAsync( + `${cmd} string "My email is gary@example.com and my phone number is (223) 456-7890." -t PHONE_NUMBER`, + cwd + ); const outputA = await promiseA; t.regex(outputA, /EMAIL_ADDRESS/); diff --git a/dlp/system-test/metadata.test.js b/dlp/system-test/metadata.test.js index 5f088a4620..cabb1205fd 100644 --- a/dlp/system-test/metadata.test.js +++ b/dlp/system-test/metadata.test.js @@ -24,13 +24,13 @@ const cwd = path.join(__dirname, `..`); test.before(tools.checkCredentials); -test(`should list info types for a given category`, async (t) => { +test(`should list info types for a given category`, async t => { const output = await tools.runAsync(`${cmd} infoTypes GOVERNMENT`, cwd); t.regex(output, /US_DRIVERS_LICENSE_NUMBER/); t.false(output.includes('AMERICAN_BANKERS_CUSIP_ID')); }); -test(`should inspect categories`, async (t) => { +test(`should inspect categories`, async t => { const output = await tools.runAsync(`${cmd} categories`, cwd); t.regex(output, /FINANCE/); }); diff --git a/dlp/system-test/quickstart.test.js b/dlp/system-test/quickstart.test.js index ce0a1a74ae..262fe0f5b6 100644 --- a/dlp/system-test/quickstart.test.js +++ b/dlp/system-test/quickstart.test.js @@ -24,7 +24,7 @@ const cwd = path.join(__dirname, `..`); test.before(tools.checkCredentials); -test(`should run`, async (t) => { +test(`should run`, async t => { const output = await tools.runAsync(cmd, cwd); t.regex(output, /Info type: US_MALE_NAME/); }); diff --git a/dlp/system-test/redact.test.js b/dlp/system-test/redact.test.js index 13c60e2cb3..69bba49d7f 100644 --- a/dlp/system-test/redact.test.js +++ b/dlp/system-test/redact.test.js @@ -29,53 +29,89 @@ const testResourcePath = 'system-test/resources'; test.before(tools.checkCredentials); // redact_string -test(`should redact multiple sensitive data types from a string`, async (t) => { - const output = await tools.runAsync(`${cmd} string "I am Gary and my phone number is (123) 456-7890." REDACTED -t US_MALE_NAME PHONE_NUMBER`, cwd); +test(`should redact multiple sensitive data types from a string`, async t => { + const output = await tools.runAsync( + `${cmd} string "I am Gary and my phone number is (123) 456-7890." REDACTED -t US_MALE_NAME PHONE_NUMBER`, + cwd + ); t.is(output, 'I am REDACTED and my phone number is REDACTED.'); }); -test(`should redact a single sensitive data type from a string`, async (t) => { - const output = await tools.runAsync(`${cmd} string "I am Gary and my phone number is (123) 456-7890." REDACTED -t PHONE_NUMBER`, cwd); +test(`should redact a single sensitive data type from a string`, async t => { + const output = await tools.runAsync( + `${cmd} string "I am Gary and my phone number is (123) 456-7890." REDACTED -t PHONE_NUMBER`, + cwd + ); t.is(output, 'I am Gary and my phone number is REDACTED.'); }); -test(`should report string redaction handling errors`, async (t) => { - const output = await tools.runAsync(`${cmd} string "My name is Gary and my phone number is (123) 456-7890." REDACTED -t BAD_TYPE`, cwd); +test(`should report string redaction handling errors`, async t => { + const output = await tools.runAsync( + `${cmd} string "My name is Gary and my phone number is (123) 456-7890." REDACTED -t BAD_TYPE`, + cwd + ); t.regex(output, /Error in redactString/); }); // redact_image -test(`should redact a single sensitive data type from an image`, async (t) => { +test(`should redact a single sensitive data type from an image`, async t => { const testName = `redact-multiple-types`; - const output = await tools.runAsync(`${cmd} image ${testImage} ${testName}.result.png -t PHONE_NUMBER EMAIL_ADDRESS`, cwd); - - t.true(output.includes(`Saved image redaction results to path: ${testName}.result.png`)); - - const correct = fs.readFileSync(`${testResourcePath}/${testName}.correct.png`); + const output = await tools.runAsync( + `${cmd} image ${testImage} ${testName}.result.png -t PHONE_NUMBER EMAIL_ADDRESS`, + cwd + ); + + t.true( + output.includes( + `Saved image redaction results to path: ${testName}.result.png` + ) + ); + + const correct = fs.readFileSync( + `${testResourcePath}/${testName}.correct.png` + ); const result = fs.readFileSync(`${testName}.result.png`); t.deepEqual(correct, result); }); -test(`should redact multiple sensitive data types from an image`, async (t) => { +test(`should redact multiple sensitive data types from an image`, async t => { const testName = `redact-single-type`; - const output = await tools.runAsync(`${cmd} image ${testImage} ${testName}.result.png -t PHONE_NUMBER`, cwd); - - t.true(output.includes(`Saved image redaction results to path: ${testName}.result.png`)); - - const correct = fs.readFileSync(`${testResourcePath}/${testName}.correct.png`); + const output = await tools.runAsync( + `${cmd} image ${testImage} ${testName}.result.png -t PHONE_NUMBER`, + cwd + ); + + t.true( + output.includes( + `Saved image redaction results to path: ${testName}.result.png` + ) + ); + + const correct = fs.readFileSync( + `${testResourcePath}/${testName}.correct.png` + ); const result = fs.readFileSync(`${testName}.result.png`); t.deepEqual(correct, result); }); -test(`should report image redaction handling errors`, async (t) => { - const output = await tools.runAsync(`${cmd} image ${testImage} nonexistent.result.png -t BAD_TYPE`, cwd); +test(`should report image redaction handling errors`, async t => { + const output = await tools.runAsync( + `${cmd} image ${testImage} nonexistent.result.png -t BAD_TYPE`, + cwd + ); t.regex(output, /Error in redactImage/); }); // CLI options -test(`should have a minLikelihood option`, async (t) => { - const promiseA = tools.runAsync(`${cmd} string "My phone number is (123) 456-7890." REDACTED -t PHONE_NUMBER -m VERY_LIKELY`, cwd); - const promiseB = tools.runAsync(`${cmd} string "My phone number is (123) 456-7890." REDACTED -t PHONE_NUMBER -m UNLIKELY`, cwd); +test(`should have a minLikelihood option`, async t => { + const promiseA = tools.runAsync( + `${cmd} string "My phone number is (123) 456-7890." REDACTED -t PHONE_NUMBER -m VERY_LIKELY`, + cwd + ); + const promiseB = tools.runAsync( + `${cmd} string "My phone number is (123) 456-7890." REDACTED -t PHONE_NUMBER -m UNLIKELY`, + cwd + ); const outputA = await promiseA; t.is(outputA, 'My phone number is (123) 456-7890.'); diff --git a/dlp/system-test/risk.test.js b/dlp/system-test/risk.test.js index 8481ad911e..91a24dd83a 100644 --- a/dlp/system-test/risk.test.js +++ b/dlp/system-test/risk.test.js @@ -30,67 +30,100 @@ const numericField = 'Age'; test.before(tools.checkCredentials); // numericalRiskAnalysis -test(`should perform numerical risk analysis`, async (t) => { - const output = await tools.runAsync(`${cmd} numerical ${dataset} harmful ${numericField}`, cwd); +test(`should perform numerical risk analysis`, async t => { + const output = await tools.runAsync( + `${cmd} numerical ${dataset} harmful ${numericField}`, + cwd + ); t.regex(output, /Value at 0% quantile: \d{2}/); t.regex(output, /Value at \d{2}% quantile: \d{2}/); }); -test(`should handle numerical risk analysis errors`, async (t) => { - const output = await tools.runAsync(`${cmd} numerical ${dataset} nonexistent ${numericField}`, cwd); +test(`should handle numerical risk analysis errors`, async t => { + const output = await tools.runAsync( + `${cmd} numerical ${dataset} nonexistent ${numericField}`, + cwd + ); t.regex(output, /Error in numericalRiskAnalysis/); }); // categoricalRiskAnalysis -test(`should perform categorical risk analysis on a string field`, async (t) => { - const output = await tools.runAsync(`${cmd} categorical ${dataset} harmful ${uniqueField}`, cwd); +test(`should perform categorical risk analysis on a string field`, async t => { + const output = await tools.runAsync( + `${cmd} categorical ${dataset} harmful ${uniqueField}`, + cwd + ); t.regex(output, /Most common value occurs \d time\(s\)/); }); -test(`should perform categorical risk analysis on a number field`, async (t) => { - const output = await tools.runAsync(`${cmd} categorical ${dataset} harmful ${numericField}`, cwd); +test(`should perform categorical risk analysis on a number field`, async t => { + const output = await tools.runAsync( + `${cmd} categorical ${dataset} harmful ${numericField}`, + cwd + ); t.regex(output, /Most common value occurs \d time\(s\)/); }); -test(`should handle categorical risk analysis errors`, async (t) => { - const output = await tools.runAsync(`${cmd} categorical ${dataset} nonexistent ${uniqueField}`, cwd); +test(`should handle categorical risk analysis errors`, async t => { + const output = await tools.runAsync( + `${cmd} categorical ${dataset} nonexistent ${uniqueField}`, + cwd + ); t.regex(output, /Error in categoricalRiskAnalysis/); }); // kAnonymityAnalysis -test(`should perform k-anonymity analysis on a single field`, async (t) => { - const output = await tools.runAsync(`${cmd} kAnonymity ${dataset} harmful ${numericField}`, cwd); +test(`should perform k-anonymity analysis on a single field`, async t => { + const output = await tools.runAsync( + `${cmd} kAnonymity ${dataset} harmful ${numericField}`, + cwd + ); t.regex(output, /Quasi-ID values: \{\d{2}\}/); t.regex(output, /Class size: \d/); }); -test(`should perform k-anonymity analysis on multiple fields`, async (t) => { - const output = await tools.runAsync(`${cmd} kAnonymity ${dataset} harmful ${numericField} ${repeatedField}`, cwd); +test(`should perform k-anonymity analysis on multiple fields`, async t => { + const output = await tools.runAsync( + `${cmd} kAnonymity ${dataset} harmful ${numericField} ${repeatedField}`, + cwd + ); t.regex(output, /Quasi-ID values: \{\d{2}, \d{4} \d{4} \d{4} \d{4}\}/); t.regex(output, /Class size: \d/); }); -test(`should handle k-anonymity analysis errors`, async (t) => { - const output = await tools.runAsync(`${cmd} kAnonymity ${dataset} nonexistent ${numericField}`, cwd); +test(`should handle k-anonymity analysis errors`, async t => { + const output = await tools.runAsync( + `${cmd} kAnonymity ${dataset} nonexistent ${numericField}`, + cwd + ); t.regex(output, /Error in kAnonymityAnalysis/); }); // lDiversityAnalysis -test(`should perform l-diversity analysis on a single field`, async (t) => { - const output = await tools.runAsync(`${cmd} lDiversity ${dataset} harmful ${uniqueField} ${numericField}`, cwd); +test(`should perform l-diversity analysis on a single field`, async t => { + const output = await tools.runAsync( + `${cmd} lDiversity ${dataset} harmful ${uniqueField} ${numericField}`, + cwd + ); t.regex(output, /Quasi-ID values: \{\d{2}\}/); t.regex(output, /Class size: \d/); t.regex(output, /Sensitive value James occurs \d time\(s\)/); }); -test(`should perform l-diversity analysis on multiple fields`, async (t) => { - const output = await tools.runAsync(`${cmd} lDiversity ${dataset} harmful ${uniqueField} ${numericField} ${repeatedField}`, cwd); +test(`should perform l-diversity analysis on multiple fields`, async t => { + const output = await tools.runAsync( + `${cmd} lDiversity ${dataset} harmful ${uniqueField} ${numericField} ${repeatedField}`, + cwd + ); t.regex(output, /Quasi-ID values: \{\d{2}, \d{4} \d{4} \d{4} \d{4}\}/); t.regex(output, /Class size: \d/); t.regex(output, /Sensitive value James occurs \d time\(s\)/); }); -test(`should handle l-diversity analysis errors`, async (t) => { - const output = await tools.runAsync(`${cmd} lDiversity ${dataset} nonexistent ${uniqueField} ${numericField}`, cwd); +test(`should handle l-diversity analysis errors`, async t => { + const output = await tools.runAsync( + `${cmd} lDiversity ${dataset} nonexistent ${uniqueField} ${numericField}`, + cwd + ); t.regex(output, /Error in lDiversityAnalysis/); });