Skip to content
This repository has been archived by the owner on Jul 20, 2023. It is now read-only.

chore: modernize the samples and sample tests #297

Merged
merged 1 commit into from
Dec 18, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 0 additions & 10 deletions greenkeeper.json

This file was deleted.

6 changes: 0 additions & 6 deletions samples/automl/.eslintrc.yml

This file was deleted.

2 changes: 1 addition & 1 deletion samples/automl/automlVisionDataset.js
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ require(`yargs`) // eslint-disable-line
computeRegion: {
alias: `c`,
type: `string`,
default: process.env.REGION_NAME,
default: 'us-central1',
requiresArg: true,
description: `region name e.g. "us-central1"`,
},
Expand Down
2 changes: 1 addition & 1 deletion samples/automl/automlVisionModel.js
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -428,7 +428,7 @@ require(`yargs`) // eslint-disable-line
computeRegion: {
alias: `c`,
type: `string`,
default: process.env.REGION_NAME,
default: 'us-central1',
requiresArg: true,
description: `region name e.g. "us-central1"`,
},
Expand Down
2 changes: 1 addition & 1 deletion samples/automl/automlVisionPredict.js
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ require(`yargs`) // eslint-disable-line
computeRegion: {
alias: `c`,
type: `string`,
default: process.env.REGION_NAME,
default: 'us-central1',
requiresArg: true,
description: `region name e.g. "us-central1"`,
},
Expand Down
27 changes: 0 additions & 27 deletions samples/automl/package.json

This file was deleted.

3 changes: 0 additions & 3 deletions samples/automl/system-test/.eslintrc.yml

This file was deleted.

128 changes: 50 additions & 78 deletions samples/faceDetection.js
Original file line number Diff line number Diff line change
Expand Up @@ -33,105 +33,77 @@ const fs = require('fs');
* Uses the Vision API to detect faces in the given file.
*/
// [START vision_face_detection_tutorial_send_request]
function detectFaces(inputFile, callback) {
async function detectFaces(inputFile) {
// Make a call to the Vision API to detect the faces
const request = {image: {source: {filename: inputFile}}};
client
.faceDetection(request)
.then(results => {
const faces = results[0].faceAnnotations;
const numFaces = faces.length;
console.log('Found ' + numFaces + (numFaces === 1 ? ' face' : ' faces'));
callback(null, faces);
})
.catch(err => {
console.error('ERROR:', err);
callback(err);
});
const results = await client.faceDetection(request);
const faces = results[0].faceAnnotations;
const numFaces = faces.length;
console.log(`Found ${numFaces} face${numFaces === 1 ? '' : 's'}.`);
return faces;
}
// [END vision_face_detection_tutorial_send_request]

/**
* Draws a polygon around the faces, then saves to outputFile.
*/
// [START vision_face_detection_tutorial_process_response]
function highlightFaces(inputFile, faces, outputFile, Canvas, callback) {
fs.readFile(inputFile, (err, image) => {
if (err) {
return callback(err);
}

const Image = Canvas.Image;
// Open the original image into a canvas
const img = new Image();
img.src = image;
const canvas = new Canvas.Canvas(img.width, img.height);
const context = canvas.getContext('2d');
context.drawImage(img, 0, 0, img.width, img.height);
async function highlightFaces(inputFile, faces, outputFile, Canvas) {
const {promisify} = require('util');
const readFile = promisify(fs.readFile);
const image = await readFile(inputFile);
const Image = Canvas.Image;
// Open the original image into a canvas
const img = new Image();
img.src = image;
const canvas = new Canvas.Canvas(img.width, img.height);
const context = canvas.getContext('2d');
context.drawImage(img, 0, 0, img.width, img.height);

// Now draw boxes around all the faces
context.strokeStyle = 'rgba(0,255,0,0.8)';
context.lineWidth = '5';
// Now draw boxes around all the faces
context.strokeStyle = 'rgba(0,255,0,0.8)';
context.lineWidth = '5';

faces.forEach(face => {
context.beginPath();
let origX = 0;
let origY = 0;
face.boundingPoly.vertices.forEach((bounds, i) => {
if (i === 0) {
origX = bounds.x;
origY = bounds.y;
}
context.lineTo(bounds.x, bounds.y);
});
context.lineTo(origX, origY);
context.stroke();
faces.forEach(face => {
context.beginPath();
let origX = 0;
let origY = 0;
face.boundingPoly.vertices.forEach((bounds, i) => {
if (i === 0) {
origX = bounds.x;
origY = bounds.y;
}
context.lineTo(bounds.x, bounds.y);
});
context.lineTo(origX, origY);
context.stroke();
});

// Write the result to a file
console.log('Writing to file ' + outputFile);
const writeStream = fs.createWriteStream(outputFile);
const pngStream = canvas.pngStream();
// Write the result to a file
console.log(`Writing to file ${outputFile}`);
const writeStream = fs.createWriteStream(outputFile);
const pngStream = canvas.pngStream();

pngStream.on('data', chunk => {
writeStream.write(chunk);
});
pngStream.on('error', console.log);
pngStream.on('end', callback);
await new Promise((resolve, reject) => {
pngStream
.on('data', chunk => writeStream.write(chunk))
.on('error', reject)
.on('end', resolve);
});
}
// [END vision_face_detection_tutorial_process_response]

// Run the example
// [START vision_face_detection_tutorial_run_application]
function main(inputFile, outputFile, Canvas, callback) {
async function main(inputFile, outputFile) {
const Canvas = require('canvas');
outputFile = outputFile || 'out.png';
detectFaces(inputFile, (err, faces) => {
if (err) {
return callback(err);
}

console.log('Highlighting...');
highlightFaces(inputFile, faces, outputFile, Canvas, err => {
if (err) {
return callback(err);
}
console.log('Finished!');
callback(null, faces);
});
});
const faces = await detectFaces(inputFile);
console.log('Highlighting...');
await highlightFaces(inputFile, faces, outputFile, Canvas);
console.log('Finished!');
}
// [END vision_face_detection_tutorial_run_application]

exports.main = main;

if (module === require.main) {
if (process.argv.length < 3) {
console.log('Usage: node faceDetection <inputFile> [outputFile]');
// eslint-disable-next-line no-process-exit
process.exit(1);
}
const inputFile = process.argv[2];
const outputFile = process.argv[3];
exports.main(inputFile, outputFile, require('canvas'), console.log);
}
const args = process.argv.slice(2);
main(...args).catch(console.error);
15 changes: 8 additions & 7 deletions samples/package.json
Original file line number Diff line number Diff line change
@@ -1,30 +1,31 @@
{
"name": "nodejs-docs-samples-vision",
"version": "0.0.1",
"private": true,
"license": "Apache-2.0",
"author": "Google Inc.",
"engines": {
"node": ">=8"
},
"files": [
"*.js"
],
"scripts": {
"test": "mocha system-test/*.test.js --timeout 600000"
"test": "mocha system-test --timeout 600000"
},
"dependencies": {
"@google-cloud/automl": "^0.1.1",
"@google-cloud/automl": "^0.1.3",
"@google-cloud/vision": "^0.23.0",
"async": "^2.6.1",
"mathjs": "^5.0.4",
"natural": "^0.6.1",
"redis": "^2.8.0",
"yargs": "^12.0.0",
"canvas": "^2.0.0"
},
"devDependencies": {
"@google-cloud/nodejs-repo-tools": "^3.0.0",
"@google-cloud/storage": "^2.0.0",
"chai": "^4.2.0",
"execa": "^1.0.0",
"mocha": "^5.0.0",
"uuid": "^3.2.1"
},
"optionalDependencies": {}
}
}
6 changes: 0 additions & 6 deletions samples/productSearch/system-test/.eslintrc.yml

This file was deleted.

17 changes: 8 additions & 9 deletions samples/quickstart.js
Original file line number Diff line number Diff line change
Expand Up @@ -16,20 +16,19 @@
'use strict';

// [START vision_quickstart]
// Imports the Google Cloud client library
const vision = require('@google-cloud/vision');
async function quickstart() {
// Imports the Google Cloud client library
const vision = require('@google-cloud/vision');

// Creates a client
const client = new vision.ImageAnnotatorClient();
// Creates a client
const client = new vision.ImageAnnotatorClient();

// Performs label detection on the image file
async function main() {
// Performs label detection on the image file
const [result] = await client.labelDetection('./resources/wakeupcat.jpg');
const labels = result.labelAnnotations;
console.log('Labels:');
labels.forEach(label => console.log(label.description));
}
main().catch(err => {
console.error('ERROR:', err);
});
// [END vision_quickstart]

quickstart().catch(console.error);
3 changes: 0 additions & 3 deletions samples/system-test/.eslintrc.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,3 @@
---
env:
mocha: true
rules:
node/no-unpublished-require: off
no-empty: off
Loading