Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 4abc5f5

Browse files
authoredMay 6, 2021
Merge pull request #7 from exadel-inc/sdk-0.5.1-new-functionality
Sdk 0.5.1 new functionality
2 parents cc71d41 + 64ccb2e commit 4abc5f5

25 files changed

+12299
-81
lines changed
 

‎README.md

Lines changed: 19 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@ CompreFace JavaScript SDK makes face recognition into your application even easi
99
- [Initialization](#initialization)
1010
- [Adding faces into a face collection](#adding-faces-into-a-face-collection)
1111
- [Recognition](#recognition)
12+
- [Enviroments](#enviroments)
13+
- [Webcam demo](#webcam-demo)
1214
- [Reference](#reference)
1315
- [CompreFace Global Object](#compreface-global-object)
1416
- [Recognition Service](#recognition-service)
@@ -102,6 +104,17 @@ recognitionService.recognize(path_to_image)
102104
})
103105
```
104106

107+
### Enviroments
108+
NOTE: We provide 3 ways of uploading image to our SDK. They are url, blob and relative path (from local machine).
109+
110+
| Enviroments | from URL | with Blob format | from local machine|
111+
| ------------|--------- | ---------------- | ---------------- |
112+
| Browser ||||
113+
| Nodejs ||||
114+
115+
### Webcam demo
116+
[Documentation is here](/webcam_demo)
117+
105118
## Reference
106119

107120
### CompreFace Global Object
@@ -209,7 +222,7 @@ The first argument is the image location, it could be a URL or a path on the loc
209222

210223
| Argument | Type | Required | Notes |
211224
| --------------- | ------ | -------- | ----------------------------------------- |
212-
| image_location | string | required | URL or local machine path to the image you want to recognize |
225+
| image_location | string | required | URL, image in BLOB format or image from your local machine|
213226
| options | string | optional | Object that defines recognition options |
214227

215228
Supported options:
@@ -315,7 +328,7 @@ Adds an image to your face collection.
315328

316329
| Argument | Type | Required | Notes |
317330
| --------------- | ------ | -------- | ----------------------------------------- |
318-
| image_location | string | required | URL or local machine path to the image you want to add to face collection |
331+
| image_location | string | required | URL, image in BLOB format or image from your local machine |
319332
| subject | string | required | Name or any other person ID. It can be just a random string you generate and save for further identification |
320333
| options | string | optional | Object that defines adding options |
321334

@@ -481,7 +494,7 @@ Compares similarities of given image with image from your face collection.
481494

482495
| Argument | Type | Required | Notes |
483496
| --------------- | ------ | -------- | ----------------------------------------- |
484-
| image_location | string | required | URL or local machine path to the image you want to recognize |
497+
| image_location | string | required | URL, image in BLOB format or image from your local machine |
485498
| options | string | optional | Object that defines recognition options |
486499

487500
Supported options:
@@ -576,7 +589,7 @@ The first argument is the image location, it could be a URL or a path on the loc
576589

577590
| Argument | Type | Required | Notes |
578591
| --------------- | ------ | -------- | ----------------------------------------- |
579-
| image_location | string | required | URL or local machine path to the image you want to recognize |
592+
| image_location | string | required | URL, image in BLOB format or image from your local machine |
580593
| options | string | optional | Object that defines detection options |
581594

582595
Supported options:
@@ -667,8 +680,8 @@ The first two arguments are the image location, it could be a URL or a path on t
667680

668681
| Argument | Type | Required | Notes |
669682
| ---------------------- | ------ | -------- | ----------------------------------------- |
670-
| source_image_location | string | required | URL or local machine path to the source image you want to compare |
671-
| target_image_location | string | required | URL or local machine path to the target image you want to compare |
683+
| source_image_location | string | required | URL, source image in BLOB format or source image from your local machine |
684+
| target_image_location | string | required | URL, target image in BLOB format or target image from your local machine |
672685
| options | string | optional | Object that defines detection options |
673686

674687
Supported options:

‎endpoints/common_endpoints.js

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
/*
2+
* Copyright (c) 2020 the original author or authors
3+
*
4+
* Licensed under the Apache License, Version 2.0 (the "License");
5+
* you may not use this file except in compliance with the License.
6+
* You may obtain a copy of the License at
7+
*
8+
* https://www.apache.org/licenses/LICENSE-2.0
9+
*
10+
* Unless required by applicable law or agreed to in writing, software
11+
* distributed under the License is distributed on an "AS IS" BASIS,
12+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
13+
* or implied. See the License for the specific language governing
14+
* permissions and limitations under the License.
15+
*/
16+
import axios from 'axios';
17+
import FormData from 'form-data';
18+
19+
// Collection of common endpoints that used by almost all services
20+
const common_endpoints = {
21+
async upload_blob(blobData, url, api_key){
22+
var bodyFormData = new FormData();
23+
bodyFormData.append('file', blobData, 'example.jpg');
24+
25+
return new Promise( async (resolve, reject) => {
26+
try {
27+
const response = await axios.post( url, bodyFormData, {
28+
headers: {
29+
'Content-Type': 'multipart/form-data',
30+
"x-api-key": api_key
31+
},
32+
})
33+
34+
resolve(response)
35+
} catch (error) {
36+
reject(error)
37+
}
38+
})
39+
}
40+
}
41+
42+
export { common_endpoints }

‎endpoints/verification_endpoints.js

Lines changed: 158 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -100,61 +100,169 @@ const verification_endpoints = {
100100
*/
101101
one_url_request(source_image_path, isSourceImageUrl, target_image_path, url, api_key ){
102102
var bodyFormData = new FormData();
103+
let path_is_url = [];
104+
let path_is_relative = [];
103105

104106
if(isSourceImageUrl){
105-
bodyFormData.append('target_image', fs.createReadStream(target_image_path), { knownLength: fs.statSync(target_image_path).size });
106-
107-
return new Promise( async (resolve, reject) => {
108-
await axios.get(source_image_path, { responseType: 'stream' })
109-
.then( async (response) => {
110-
let image_extention = response.headers['content-type'].split("/")[1]
111-
bodyFormData.append('source_image', response.data, `example.${image_extention}`);
112-
113-
try {
114-
const res = await axios.post( url, bodyFormData, {
115-
headers: {
116-
...bodyFormData.getHeaders(),
117-
"x-api-key": api_key
118-
},
119-
})
120-
121-
resolve(res)
122-
} catch (error) {
123-
reject(error)
124-
}
125-
})
126-
.catch(error => {
107+
path_is_url[0] = "source_image";
108+
path_is_url[1] = source_image_path;
109+
110+
path_is_relative[0] = "target_image";
111+
path_is_relative[1] = target_image_path;
112+
}else{
113+
path_is_url = "target_image";
114+
path_is_url[1] = target_image_path;
115+
116+
path_is_relative = "source_image";
117+
path_is_relative[1] = source_image_path;
118+
}
119+
120+
bodyFormData.append(path_is_relative[0], fs.createReadStream(path_is_relative[1]), { knownLength: fs.statSync(target_image_path).size });
121+
122+
return new Promise( async (resolve, reject) => {
123+
await axios.get(path_is_url[1], { responseType: 'stream' })
124+
.then( async (response) => {
125+
let image_extention = response.headers['content-type'].split("/")[1]
126+
bodyFormData.append(path_is_url[0], response.data, `example.${image_extention}`);
127+
128+
try {
129+
const res = await axios.post( url, bodyFormData, {
130+
headers: {
131+
...bodyFormData.getHeaders(),
132+
"x-api-key": api_key
133+
},
134+
})
135+
136+
resolve(res)
137+
} catch (error) {
127138
reject(error)
128-
})
129-
})
130-
}else {
131-
bodyFormData.append('source_image', fs.createReadStream(source_image_path), { knownLength: fs.statSync(source_image_path).size });
132-
133-
return new Promise( async (resolve, reject) => {
134-
await axios.get(target_image_path, { responseType: 'stream' })
135-
.then( async (response) => {
136-
let image_extention = response.headers['content-type'].split("/")[1]
137-
bodyFormData.append('target_image', response.data, `example.${image_extention}`);
138-
139-
try {
140-
const res = await axios.post( url, bodyFormData, {
141-
headers: {
142-
...bodyFormData.getHeaders(),
143-
"x-api-key": api_key
144-
},
145-
})
146-
147-
resolve(res)
148-
} catch (error) {
149-
reject(error)
150-
}
151-
})
152-
.catch(error => {
139+
}
140+
})
141+
.catch(error => {
142+
reject(error)
143+
})
144+
})
145+
},
146+
/**
147+
* Verify face(s) from given blob data
148+
* @param {String} source_image_path
149+
* @param {String} target_image_path
150+
* @param {Boolean} isSourceBlob
151+
* @param {String} url
152+
* @param {String} api_key
153+
* @returns {Promise}
154+
*/
155+
url_blob_request(source_image_path, isSourceImageUrl, target_image_path, url, api_key){
156+
let bodyFormData = new FormData();
157+
let path_is_url = [];
158+
let path_is_blob = [];
159+
160+
if(isSourceImageUrl){
161+
path_is_url[0] = "source_image";
162+
path_is_url[1] = source_image_path;
163+
164+
path_is_blob[0] = "target_image";
165+
path_is_blob[1] = target_image_path;
166+
}else{
167+
path_is_url = "target_image";
168+
path_is_url[1] = target_image_path;
169+
170+
path_is_blob = "source_image";
171+
path_is_blob[1] = source_image_path;
172+
}
173+
bodyFormData.append(path_is_blob[0], path_is_blob[1], 'example.jpg');
174+
175+
return new Promise( async (resolve, reject) => {
176+
await axios.get(path_is_url[1], { responseType: 'stream' })
177+
.then( async (response) => {
178+
let image_extention = response.headers['content-type'].split("/")[1]
179+
bodyFormData.append(path_is_url[0], response.data, `example.${image_extention}`);
180+
181+
try {
182+
const res = await axios.post( url, bodyFormData, {
183+
headers: {
184+
...bodyFormData.getHeaders(),
185+
"x-api-key": api_key
186+
},
187+
})
188+
189+
resolve(res)
190+
} catch (error) {
153191
reject(error)
154-
})
155-
})
192+
}
193+
})
194+
.catch(error => {
195+
reject(error)
196+
})
197+
})
198+
},
199+
200+
/**
201+
* Both source and target images are blob
202+
* @param {Blob} source_image_blob
203+
* @param {Blob} target_image_blob
204+
* @param {String} url
205+
* @param {String} api_key
206+
*/
207+
both_blob_request(source_image_blob, target_image_blob, url, api_key){
208+
var bodyFormData = new FormData();
209+
210+
bodyFormData.append('source_image', source_image_blob, 'example.jpg');
211+
bodyFormData.append('target_image', target_image_blob, 'example1.jpg');
212+
213+
return new Promise( async (resolve, reject) => {
214+
try {
215+
const response = await axios.post( url, bodyFormData, {
216+
headers: {
217+
'Content-Type': 'multipart/form-data',
218+
"x-api-key": api_key
219+
},
220+
})
221+
222+
resolve(response)
223+
} catch (error) {
224+
reject(error)
225+
}
226+
})
227+
},
228+
229+
one_blob_request(source_image_path, isSourceImageBlob, target_image_path, url, api_key ){
230+
var bodyFormData = new FormData();
231+
let path_is_blob = [];
232+
let path_is_relative = [];
233+
234+
if(isSourceImageBlob){
235+
path_is_blob[0] = "source_image";
236+
path_is_blob[1] = source_image_path;
237+
238+
path_is_relative[0] = "target_image";
239+
path_is_relative[1] = target_image_path;
240+
}else{
241+
path_is_blob = "target_image";
242+
path_is_blob[1] = target_image_path;
243+
244+
path_is_relative = "source_image";
245+
path_is_relative[1] = source_image_path;
156246
}
157-
}
247+
248+
bodyFormData.append(path_is_relative[0], fs.createReadStream(path_is_relative[1]), { knownLength: fs.statSync(target_image_path).size });
249+
bodyFormData.append(path_is_blob[0], path_is_blob[1], 'example.jpg');
250+
251+
return new Promise( async (resolve, reject) => {
252+
try {
253+
const response = await axios.post( url, bodyFormData, {
254+
headers: {
255+
'Content-Type': 'multipart/form-data',
256+
"x-api-key": api_key
257+
},
258+
})
259+
260+
resolve(response)
261+
} catch (error) {
262+
reject(error)
263+
}
264+
})
265+
},
158266

159267

160268
}

‎package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "@exadel/compreface-js-sdk",
3-
"version": "0.5.0",
3+
"version": "0.5.1",
44
"license": "Apache-2.0",
55
"description": "JavaScript SDK for CompreFace - free and open-source face recognition system from Exadel",
66
"main": "index.js",

‎services/detection_service.js

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616

1717
import { detection_endpoints } from '../endpoints/detection_endpoints.js';
1818
import { recognition_endpoints } from '../endpoints/recognition_endpoints.js';
19+
import { common_endpoints } from '../endpoints/common_endpoints.js';
1920
import { common_functions } from '../functions/index.js';
2021

2122
class DetectionService {
@@ -59,6 +60,14 @@ class DetectionService {
5960
.catch(error => {
6061
reject(error)
6162
})
63+
}else if(image_path instanceof Blob) {
64+
common_endpoints.upload_blob(image_path, url, this.key)
65+
.then(response => {
66+
resolve(response.data)
67+
})
68+
.catch(error => {
69+
reject(error)
70+
})
6271
}else {
6372
detection_endpoints.detect_request(image_path, url, this.key)
6473
.then(response => {

‎services/recognition_service.js

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
* permissions and limitations under the License.
1515
*/
1616
import { recognition_endpoints } from '../endpoints/recognition_endpoints.js';
17+
import { common_endpoints } from '../endpoints/common_endpoints.js';
1718
import { common_functions } from '../functions/index.js';
1819

1920
class RecognitionService {
@@ -59,6 +60,14 @@ class RecognitionService {
5960
.catch(error => {
6061
reject(error)
6162
})
63+
}else if(image_path instanceof Blob){
64+
common_endpoints.upload_blob(image_path, url, this.key)
65+
.then(response => {
66+
resolve(response.data)
67+
})
68+
.catch(error => {
69+
reject(error)
70+
})
6271
}else {
6372
recognition_endpoints.face_request(image_path, url, this.key)
6473
.then(response => {
@@ -126,6 +135,14 @@ class RecognitionService {
126135
.catch(error => {
127136
reject(error)
128137
})
138+
}else if(image_path instanceof Blob){
139+
common_endpoints.upload_blob(image_path, url, this.key)
140+
.then(response => {
141+
resolve(response.data)
142+
})
143+
.catch(error => {
144+
reject(error)
145+
})
129146
}else {
130147
recognition_endpoints.face_request(image_path, url, key)
131148
.then(response => {
@@ -170,6 +187,14 @@ class RecognitionService {
170187
.catch(error => {
171188
reject(error)
172189
})
190+
}else if(image_path instanceof Blob){
191+
common_endpoints.upload_blob(image_path, url, this.key)
192+
.then(response => {
193+
resolve(response.data)
194+
})
195+
.catch(error => {
196+
reject(error)
197+
})
173198
}else {
174199
recognition_endpoints.face_request(image_path, url, key)
175200
.then(response => {

‎services/verification_service.js

Lines changed: 81 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -44,31 +44,88 @@ class VerificationService {
4444
let isSourceImageUrl = isUrl(source_image_path);
4545
let isTargetImageUrl = isUrl(target_image_path);
4646

47+
let isSourceBlob = source_image_path instanceof Blob;
48+
let isTargetBlob = target_image_path instanceof Blob;
49+
4750
return new Promise((resolve, reject) => {
48-
if(isSourceImageUrl && isTargetImageUrl){
49-
verification_endpoints.both_url_request(source_image_path, target_image_path, url, this.key)
50-
.then(response => {
51-
resolve(response.data)
52-
})
53-
.catch(error => {
54-
reject(error)
55-
})
56-
}else if (!isSourceImageUrl && !isTargetImageUrl){
57-
verification_endpoints.verify_face_request(source_image_path, target_image_path, url, this.key)
58-
.then(response => {
59-
resolve(response.data)
60-
})
61-
.catch(error => {
62-
reject(error)
63-
})
64-
}else if(!isSourceImageUrl || !isTargetImageUrl){
65-
verification_endpoints.one_url_request(source_image_path, isSourceImageUrl, target_image_path, url, this.key)
66-
.then(response => {
67-
resolve(response.data)
68-
})
69-
.catch(error => {
70-
reject(error)
71-
})
51+
if(isSourceImageUrl){
52+
if(isTargetImageUrl){
53+
verification_endpoints.both_url_request(source_image_path, target_image_path, url, this.key)
54+
.then(response => {
55+
resolve(response.data)
56+
})
57+
.catch(error => {
58+
reject(error)
59+
})
60+
}else if(isTargetBlob){
61+
verification_endpoints.url_blob_request(source_image_path, isSourceImageUrl, target_image_path, url, this.key)
62+
.then(response => {
63+
resolve(response.data)
64+
})
65+
.catch(error => {
66+
reject(error)
67+
})
68+
}else {
69+
verification_endpoints.one_url_request(source_image_path, isSourceImageUrl, target_image_path, url, this.key)
70+
.then(response => {
71+
resolve(response.data)
72+
})
73+
.catch(error => {
74+
reject(error)
75+
})
76+
}
77+
}else if(isSourceBlob){
78+
if(isTargetImageUrl){
79+
verification_endpoints.url_blob_request(source_image_path, isSourceImageUrl, target_image_path, url, this.key)
80+
.then(response => {
81+
resolve(response.data)
82+
})
83+
.catch(error => {
84+
reject(error)
85+
})
86+
}else if(isTargetBlob){
87+
verification_endpoints.both_blob_request(source_image_path, target_image_path, url, this.key)
88+
.then(response => {
89+
resolve(response.data)
90+
})
91+
.catch(error => {
92+
reject(error)
93+
})
94+
}else {
95+
verification_endpoints.one_blob_request(source_image_path, isSourceBlob, target_image_path, url, this.key)
96+
.then(response => {
97+
resolve(response.data)
98+
})
99+
.catch(error => {
100+
reject(error)
101+
})
102+
}
103+
}else {
104+
if(isTargetImageUrl){
105+
verification_endpoints.one_url_request(source_image_path, isSourceImageUrl, target_image_path, url, this.key)
106+
.then(response => {
107+
resolve(response.data)
108+
})
109+
.catch(error => {
110+
reject(error)
111+
})
112+
}else if(isTargetBlob){
113+
verification_endpoints.one_blob_request(source_image_path, target_image_path, url, this.key)
114+
.then(response => {
115+
resolve(response.data)
116+
})
117+
.catch(error => {
118+
reject(error)
119+
})
120+
}else {
121+
verification_endpoints.verify_face_request(source_image_path, target_image_path, url, this.key)
122+
.then(response => {
123+
resolve(response.data)
124+
})
125+
.catch(error => {
126+
reject(error)
127+
})
128+
}
72129
}
73130
})
74131
}

‎webcam_demo/.gitignore

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2+
3+
# dependencies
4+
/node_modules
5+
/.pnp
6+
.pnp.js
7+
8+
# testing
9+
/coverage
10+
11+
# production
12+
/build
13+
14+
# misc
15+
.DS_Store
16+
.env.local
17+
.env.development.local
18+
.env.test.local
19+
.env.production.local
20+
21+
npm-debug.log*
22+
yarn-debug.log*
23+
yarn-error.log*

‎webcam_demo/README.md

Lines changed: 246 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,246 @@
1+
# Webcam Demo
2+
3+
In this documentation we show how to use our detection service with webcamera. **NOTE:** we have chosen reactjs as it is today's one of the most popular UI library.
4+
5+
1. Clone our repository
6+
2. Enter to ```webcam_demo``` folder and install packages
7+
8+
``` cd webcam_demo```
9+
10+
```npm install```
11+
12+
3. Change detection API key inside ```src > App.js``` line ```40```
13+
14+
4. Start project
15+
16+
```npm start```
17+
18+
5. Click ```video start``` button to start your webcamera
19+
20+
*OR follow below instructions to create project by yourself*
21+
22+
1. Install reactjs
23+
24+
```npx create-react-app compreface-demo```
25+
26+
2. Enter to project folder
27+
28+
```cd compreface-demo```
29+
30+
3. Install CompreFace SDK
31+
32+
```npm i @exadel/compreface-js-sdk```
33+
34+
4. Create your component and copy/past following code. NOTE: We have used functional component and video tag used to connect to your webcamera and canvas tags used for drawing square and some extra data.
35+
36+
```
37+
import { useRef } from 'react'
38+
import { CompreFace } from '@exadel/compreface-js-sdk';
39+
40+
function App() {
41+
const videoTag = useRef(null);
42+
const canvas1 = useRef(null);
43+
const canvas2 = useRef(null);
44+
const canvas3 = useRef(null);
45+
46+
const handleVideoStart = () => {
47+
console.log("Click is working")
48+
}
49+
50+
return (
51+
<div>
52+
<video ref={videoTag} width="640" height="480" autoPlay muted ></video>
53+
<canvas ref={canvas1} width="640" id="canvas" height="480" style={{ display: 'none' }}></canvas>
54+
<canvas ref={canvas2} width="640" id="canvas2" height="480" style={{ position: 'absolute' }} ></canvas>
55+
<canvas ref={canvas3} width="640" height="480" style={{ position: 'absolute' }}></canvas>
56+
57+
<div>
58+
<button onClick={handleVideoStart}>Start video</button>
59+
</div>
60+
</div>
61+
);
62+
}
63+
64+
export default App;
65+
```
66+
67+
5. Add ability to start webcamera when user clicks "Start video" button. Put following code into ```handleVideoStart()``` function. ```Navigator.mediaDevices``` is built in read-only property of browser which enables user to access webcamera.
68+
69+
```
70+
navigator.mediaDevices.getUserMedia({ video: true})
71+
.then(stream => videoTag.current.srcObject = stream)
72+
.catch( error => console.error(error) )
73+
```
74+
75+
6. Initialize CompreFace instances and catch video event which fired when webcamera starts working. Your code should look like as following example. ```Play``` event listener fires when webcamera starts working and this is place where we need to use CompreFace SDK. NOTE: ```next_frame``` custom event created in order to create kind of recursion effect when we drawing square on face.
76+
```
77+
import { useRef } from 'react'
78+
import { CompreFace } from '@exadel/compreface-js-sdk';
79+
80+
function App() {
81+
const videoTag = useRef(null);
82+
const canvas1 = useRef(null);
83+
const canvas2 = useRef(null);
84+
const canvas3 = useRef(null);
85+
86+
const handleVideoStart = () => {
87+
navigator.mediaDevices.getUserMedia({ video: true})
88+
.then(stream => videoTag.current.srcObject = stream)
89+
.catch( error => console.error(error) )
90+
91+
videoTag.current.addEventListener('play', () => {
92+
// CompreFace init
93+
let server = "http://localhost";
94+
let port = 8000;
95+
let detection_key = "your_api_key_for_detection_service";
96+
97+
let core = new CompreFace(server, port);
98+
let detection_service = core.initFaceDetectionService(detection_key);
99+
// end of CompreFace init
100+
101+
let ctx1 = canvas1.current.getContext('2d');
102+
let ctx2 = canvas2.current.getContext('2d');
103+
let ctx3 = canvas3.current.getContext("2d");
104+
105+
document.addEventListener('next_frame', () => {
106+
ctx1.drawImage(videoTag.current, 0, 0, 640, 480)
107+
canvas1.current.toBlob( blob => {
108+
detection_service.detect(blob, { limit: 1, face_plugins: 'age,gender' })
109+
.then(res => {
110+
/**
111+
112+
We need call draw function which draws square on face of user in front of webcamera
113+
114+
*/
115+
})
116+
.catch(error => console.log(error))
117+
}, 'image/jpeg', 0.95)
118+
})
119+
120+
const evt = new Event("next_frame", {"bubbles":true, "cancelable":false});
121+
document.dispatchEvent(evt);
122+
})
123+
}
124+
125+
return (
126+
<div>
127+
<video ref={videoTag} width="640" height="480" autoPlay muted ></video>
128+
<canvas ref={canvas1} width="640" id="canvas" height="480" style={{ display: 'none' }}></canvas>
129+
<canvas ref={canvas2} width="640" id="canvas2" height="480" style={{ position: 'absolute' }} ></canvas>
130+
<canvas ref={canvas3} width="640" height="480" style={{ position: 'absolute' }}></canvas>
131+
132+
<div>
133+
<button onClick={handleVideoStart}>Start video</button>
134+
</div>
135+
</div>
136+
);
137+
}
138+
139+
export default App;
140+
```
141+
142+
7. Add draw function. NOTE: You can extra canvas elemets which shows extra info related to detected face.
143+
144+
```
145+
const drawFace = (canvasElement, faceData, extraCanvas) => {
146+
const evt = new Event("next_frame", {"bubbles":true, "cancelable":false});
147+
document.dispatchEvent(evt);
148+
let box = faceData.result[0].box;
149+
150+
canvasElement.clearRect(0, 0, 640, 480);
151+
extraCanvas.clearRect(0, 0, 640, 480);
152+
153+
canvasElement.strokeStyle = 'green';
154+
extraCanvas.strokeStyle = "blue";
155+
extraCanvas.fillStyle = "white"
156+
157+
extraCanvas.lineWidth = 5;
158+
canvasElement.lineWidth = 5;
159+
160+
canvasElement.strokeRect(box.x_min, box.y_min, box.x_max - box.x_min, box.y_max - box.y_min);
161+
extraCanvas.fillText( Number.parseFloat(box.probability).toPrecision(5) + ' ' + faceData.result[0].gender + ' ' + faceData.result[0].age[0] + '-' + faceData.result[0].age[1], box.x_min, box.y_min - 10)
162+
}
163+
```
164+
165+
8. Final code should look like this.
166+
167+
```
168+
import { useRef } from 'react'
169+
import { CompreFace } from '@exadel/compreface-js-sdk';
170+
171+
function App() {
172+
const videoTag = useRef(null);
173+
const canvas1 = useRef(null);
174+
const canvas2 = useRef(null);
175+
const canvas3 = useRef(null);
176+
177+
const drawFace = (canvasElement, faceData, extraCanvas) => {
178+
const evt = new Event("next_frame", {"bubbles":true, "cancelable":false});
179+
document.dispatchEvent(evt);
180+
let box = faceData.result[0].box;
181+
182+
canvasElement.clearRect(0, 0, 640, 480);
183+
extraCanvas.clearRect(0, 0, 640, 480);
184+
185+
canvasElement.strokeStyle = 'green';
186+
extraCanvas.strokeStyle = "blue";
187+
extraCanvas.fillStyle = "white"
188+
189+
extraCanvas.lineWidth = 5;
190+
canvasElement.lineWidth = 5;
191+
192+
canvasElement.strokeRect(box.x_min, box.y_min, box.x_max - box.x_min, box.y_max - box.y_min);
193+
extraCanvas.fillText( Number.parseFloat(box.probability).toPrecision(5) + ' ' + faceData.result[0].gender + ' ' + faceData.result[0].age[0] + '-' + faceData.result[0].age[1], box.x_min, box.y_min - 10)
194+
}
195+
196+
const handleVideoStart = () => {
197+
navigator.mediaDevices.getUserMedia({ video: true})
198+
.then(stream => videoTag.current.srcObject = stream)
199+
.catch( error => console.error(error) )
200+
201+
videoTag.current.addEventListener('play', () => {
202+
// CompreFace init
203+
let server = "http://localhost";
204+
let port = 8000;
205+
let detection_key = "your_api_key_for_detection_service";
206+
207+
let core = new CompreFace(server, port);
208+
let detection_service = core.initFaceDetectionService(detection_key);
209+
// end of CompreFace init
210+
211+
let ctx1 = canvas1.current.getContext('2d');
212+
let ctx2 = canvas2.current.getContext('2d');
213+
let ctx3 = canvas3.current.getContext("2d");
214+
215+
document.addEventListener('next_frame', () => {
216+
ctx1.drawImage(videoTag.current, 0, 0, 640, 480)
217+
canvas1.current.toBlob( blob => {
218+
detection_service.detect(blob, { limit: 1, face_plugins: 'age,gender' })
219+
.then(res => {
220+
drawFace(ctx2, res, ctx3)
221+
})
222+
.catch(error => console.log(error))
223+
}, 'image/jpeg', 0.95)
224+
})
225+
226+
const evt = new Event("next_frame", {"bubbles":true, "cancelable":false});
227+
document.dispatchEvent(evt);
228+
})
229+
}
230+
231+
return (
232+
<div>
233+
<video ref={videoTag} width="640" height="480" autoPlay muted ></video>
234+
<canvas ref={canvas1} width="640" id="canvas" height="480" style={{ display: 'none' }}></canvas>
235+
<canvas ref={canvas2} width="640" id="canvas2" height="480" style={{ position: 'absolute' }} ></canvas>
236+
<canvas ref={canvas3} width="640" height="480" style={{ position: 'absolute' }}></canvas>
237+
238+
<div>
239+
<button onClick={handleVideoStart}>Start video</button>
240+
</div>
241+
</div>
242+
);
243+
}
244+
245+
export default App;
246+
```

‎webcam_demo/package.json

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
{
2+
"name": "webcam-tutorial",
3+
"version": "0.1.0",
4+
"private": true,
5+
"dependencies": {
6+
"@exadel/compreface-js-sdk": "^0.5.0",
7+
"@testing-library/jest-dom": "^5.11.4",
8+
"@testing-library/react": "^11.1.0",
9+
"@testing-library/user-event": "^12.1.10",
10+
"compreface-javascript-sdk": "^1.0.1",
11+
"react": "^17.0.2",
12+
"react-dom": "^17.0.2",
13+
"react-scripts": "4.0.3",
14+
"web-vitals": "^1.0.1"
15+
},
16+
"scripts": {
17+
"start": "react-scripts start",
18+
"build": "react-scripts build",
19+
"test": "react-scripts test",
20+
"eject": "react-scripts eject"
21+
},
22+
"eslintConfig": {
23+
"extends": [
24+
"react-app",
25+
"react-app/jest"
26+
]
27+
},
28+
"browserslist": {
29+
"production": [
30+
">0.2%",
31+
"not dead",
32+
"not op_mini all"
33+
],
34+
"development": [
35+
"last 1 chrome version",
36+
"last 1 firefox version",
37+
"last 1 safari version"
38+
]
39+
}
40+
}

‎webcam_demo/public/favicon.ico

3.78 KB
Binary file not shown.

‎webcam_demo/public/index.html

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
<!DOCTYPE html>
2+
<html lang="en">
3+
<head>
4+
<meta charset="utf-8" />
5+
<link rel="icon" href="%PUBLIC_URL%/favicon.ico" />
6+
<meta name="viewport" content="width=device-width, initial-scale=1" />
7+
<meta name="theme-color" content="#000000" />
8+
<meta
9+
name="description"
10+
content="Web site created using create-react-app"
11+
/>
12+
<link rel="apple-touch-icon" href="%PUBLIC_URL%/logo192.png" />
13+
<!--
14+
manifest.json provides metadata used when your web app is installed on a
15+
user's mobile device or desktop. See https://developers.google.com/web/fundamentals/web-app-manifest/
16+
-->
17+
<link rel="manifest" href="%PUBLIC_URL%/manifest.json" />
18+
<!--
19+
Notice the use of %PUBLIC_URL% in the tags above.
20+
It will be replaced with the URL of the `public` folder during the build.
21+
Only files inside the `public` folder can be referenced from the HTML.
22+
23+
Unlike "/favicon.ico" or "favicon.ico", "%PUBLIC_URL%/favicon.ico" will
24+
work correctly both with client-side routing and a non-root public URL.
25+
Learn how to configure a non-root public URL by running `npm run build`.
26+
-->
27+
<title>React App</title>
28+
</head>
29+
<body>
30+
<noscript>You need to enable JavaScript to run this app.</noscript>
31+
<div id="root"></div>
32+
<!--
33+
This HTML file is a template.
34+
If you open it directly in the browser, you will see an empty page.
35+
36+
You can add webfonts, meta tags, or analytics to this file.
37+
The build step will place the bundled scripts into the <body> tag.
38+
39+
To begin the development, run `npm start` or `yarn start`.
40+
To create a production bundle, use `npm run build` or `yarn build`.
41+
-->
42+
</body>
43+
</html>

‎webcam_demo/public/logo192.png

5.22 KB
Loading

‎webcam_demo/public/logo512.png

9.44 KB
Loading

‎webcam_demo/public/manifest.json

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
{
2+
"short_name": "React App",
3+
"name": "Create React App Sample",
4+
"icons": [
5+
{
6+
"src": "favicon.ico",
7+
"sizes": "64x64 32x32 24x24 16x16",
8+
"type": "image/x-icon"
9+
},
10+
{
11+
"src": "logo192.png",
12+
"type": "image/png",
13+
"sizes": "192x192"
14+
},
15+
{
16+
"src": "logo512.png",
17+
"type": "image/png",
18+
"sizes": "512x512"
19+
}
20+
],
21+
"start_url": ".",
22+
"display": "standalone",
23+
"theme_color": "#000000",
24+
"background_color": "#ffffff"
25+
}

‎webcam_demo/public/robots.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
# https://www.robotstxt.org/robotstxt.html
2+
User-agent: *
3+
Disallow:

‎webcam_demo/src/App.css

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
.App {
2+
text-align: center;
3+
}
4+
5+
.App-logo {
6+
height: 40vmin;
7+
pointer-events: none;
8+
}
9+
10+
@media (prefers-reduced-motion: no-preference) {
11+
.App-logo {
12+
animation: App-logo-spin infinite 20s linear;
13+
}
14+
}
15+
16+
.App-header {
17+
background-color: #282c34;
18+
min-height: 100vh;
19+
display: flex;
20+
flex-direction: column;
21+
align-items: center;
22+
justify-content: center;
23+
font-size: calc(10px + 2vmin);
24+
color: white;
25+
}
26+
27+
.App-link {
28+
color: #61dafb;
29+
}
30+
31+
@keyframes App-logo-spin {
32+
from {
33+
transform: rotate(0deg);
34+
}
35+
to {
36+
transform: rotate(360deg);
37+
}
38+
}

‎webcam_demo/src/App.js

Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,83 @@
1+
import { useRef } from 'react'
2+
import { CompreFace } from 'compreface-javascript-sdk';
3+
import './App.css';
4+
5+
function App() {
6+
const videoTag = useRef(null);
7+
const canvas1 = useRef(null);
8+
const canvas2 = useRef(null);
9+
const canvas3 = useRef(null);
10+
11+
const drawFace = (canvasElement, faceData, extraCanvas) => {
12+
const evt = new Event("next_frame", {"bubbles":true, "cancelable":false});
13+
document.dispatchEvent(evt);
14+
let box = faceData.result[0].box;
15+
16+
canvasElement.clearRect(0, 0, 640, 480);
17+
extraCanvas.clearRect(0, 0, 640, 480);
18+
19+
canvasElement.strokeStyle = 'green';
20+
extraCanvas.strokeStyle = "blue";
21+
extraCanvas.fillStyle = "white"
22+
23+
extraCanvas.lineWidth = 5;
24+
canvasElement.lineWidth = 5;
25+
26+
canvasElement.strokeRect(box.x_min, box.y_min, box.x_max - box.x_min, box.y_max - box.y_min);
27+
extraCanvas.fillText( Number.parseFloat(box.probability).toPrecision(5) + ' ' + faceData.result[0].gender + ' ' + faceData.result[0].age[0] + '-' + faceData.result[0].age[1], box.x_min, box.y_min - 10)
28+
29+
}
30+
31+
const handleVideoStart = () => {
32+
navigator.mediaDevices.getUserMedia({ video: true})
33+
.then(stream => videoTag.current.srcObject = stream)
34+
.catch( error => console.error(error) )
35+
36+
videoTag.current.addEventListener('play', () => {
37+
// CompreFace init
38+
let server = "http://localhost";
39+
let port = 8000;
40+
let detection_key = "your_api_key";
41+
42+
let core = new CompreFace(server, port);
43+
let detection_service = core.initFaceDetectionService(detection_key);
44+
// end of CompreFace init
45+
46+
let ctx1 = canvas1.current.getContext('2d');
47+
let ctx2 = canvas2.current.getContext('2d');
48+
let ctx3 = canvas3.current.getContext("2d");
49+
50+
document.addEventListener('next_frame', () => {
51+
ctx1.drawImage(videoTag.current, 0, 0, 640, 480)
52+
canvas1.current.toBlob( blob => {
53+
detection_service.detect(blob, { limit: 1, face_plugins: 'age,gender' })
54+
.then(res => {
55+
drawFace(ctx2, res, ctx3)
56+
})
57+
.catch(error => console.log(error))
58+
}, 'image/jpeg', 0.95)
59+
})
60+
61+
const evt = new Event("next_frame", {"bubbles":true, "cancelable":false});
62+
document.dispatchEvent(evt);
63+
})
64+
}
65+
66+
return (
67+
<div className="App">
68+
<header className="App-header">
69+
<video ref={videoTag} width="640" height="480" autoPlay muted ></video>
70+
<canvas ref={canvas1} width="640" id="canvas" height="480" style={{ display: 'none' }}></canvas>
71+
<canvas ref={canvas2} width="640" id="canvas2" height="480" style={{ position: 'absolute' }} ></canvas>
72+
<canvas ref={canvas3} width="640" height="480" style={{ position: 'absolute' }}></canvas>
73+
74+
<div>
75+
<button onClick={handleVideoStart} >Start video</button>
76+
</div>
77+
78+
</header>
79+
</div>
80+
);
81+
}
82+
83+
export default App;

‎webcam_demo/src/App.test.js

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
import { render, screen } from '@testing-library/react';
2+
import App from './App';
3+
4+
test('renders learn react link', () => {
5+
render(<App />);
6+
const linkElement = screen.getByText(/learn react/i);
7+
expect(linkElement).toBeInTheDocument();
8+
});

‎webcam_demo/src/index.css

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
body {
2+
margin: 0;
3+
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen',
4+
'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue',
5+
sans-serif;
6+
-webkit-font-smoothing: antialiased;
7+
-moz-osx-font-smoothing: grayscale;
8+
}
9+
10+
code {
11+
font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New',
12+
monospace;
13+
}

‎webcam_demo/src/index.js

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
import React from 'react';
2+
import ReactDOM from 'react-dom';
3+
import './index.css';
4+
import App from './App';
5+
import reportWebVitals from './reportWebVitals';
6+
7+
ReactDOM.render(
8+
<React.StrictMode>
9+
<App />
10+
</React.StrictMode>,
11+
document.getElementById('root')
12+
);
13+
14+
// If you want to start measuring performance in your app, pass a function
15+
// to log results (for example: reportWebVitals(console.log))
16+
// or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals
17+
reportWebVitals();

‎webcam_demo/src/logo.svg

Lines changed: 1 addition & 0 deletions
Loading

‎webcam_demo/src/reportWebVitals.js

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
const reportWebVitals = onPerfEntry => {
2+
if (onPerfEntry && onPerfEntry instanceof Function) {
3+
import('web-vitals').then(({ getCLS, getFID, getFCP, getLCP, getTTFB }) => {
4+
getCLS(onPerfEntry);
5+
getFID(onPerfEntry);
6+
getFCP(onPerfEntry);
7+
getLCP(onPerfEntry);
8+
getTTFB(onPerfEntry);
9+
});
10+
}
11+
};
12+
13+
export default reportWebVitals;

‎webcam_demo/src/setupTests.js

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
// jest-dom adds custom jest matchers for asserting on DOM nodes.
2+
// allows you to do things like:
3+
// expect(element).toHaveTextContent(/react/i)
4+
// learn more: https://github.com/testing-library/jest-dom
5+
import '@testing-library/jest-dom';

‎webcam_demo/yarn.lock

Lines changed: 11406 additions & 0 deletions
Large diffs are not rendered by default.

0 commit comments

Comments
 (0)
Please sign in to comment.