Skip to content

Commit

Permalink
Add working files up to partially working face recognition
Browse files Browse the repository at this point in the history
  • Loading branch information
vanessavun committed Jul 28, 2022
1 parent d3b188e commit d70384e
Show file tree
Hide file tree
Showing 7 changed files with 239 additions and 9 deletions.
92 changes: 92 additions & 0 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
"@testing-library/jest-dom": "^5.16.4",
"@testing-library/react": "^13.3.0",
"@testing-library/user-event": "^13.5.0",
"clarifai": "^2.9.1",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"react-parallax-tilt": "^1.7.46",
Expand Down
59 changes: 58 additions & 1 deletion public/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,67 @@
<html lang="en">
<head>
<meta charset="utf-8" />
<title>React App</title>
<link rel="preconnect" href="https://fonts.googleapis.com">
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
<link href="https://fonts.googleapis.com/css2?family=EB+Garamond&display=swap" rel="stylesheet">
<link rel="stylesheet" href="../src/index.css">
<title>Magic Brain React App</title>
</head>
<body>
<noscript>You need to enable JavaScript to run this app.</noscript>
<div id="root"></div>
<!-- <script>
//////////////////////////////////////////////////////////////////////////////////////////
// In this section, we set the user authentication, app ID, model details, and the URL
// of the image we want as an input. Change these strings to run your own example.
/////////////////////////////////////////////////////////////////////////////////////////
const USER_ID = 'nessabyte';
// Your PAT (Personal Access Token) can be found in the portal under Authentification
const PAT = '63d9310bff304a7486be5471c990b3fc';
const APP_ID = 'my-first-application';
// Change these to whatever model and image URL you want to use
const MODEL_ID = 'face-detection';
const MODEL_VERSION_ID = '45fb9a671625463fa646c3523a3087d5';
const IMAGE_URL = 'https://media.istockphoto.com/photos/multi-ethnic-guys-and-girls-taking-selfie-outdoors-with-backlight-picture-id1368965646?b=1&k=20&m=1368965646&s=170667a&w=0&h=9DO-7OKgwO8q7pzwNIb3aq2urlw3DNTmpKQyvvNDWgY=';
///////////////////////////////////////////////////////////////////////////////////
// YOU DO NOT NEED TO CHANGE ANYTHING BELOW THIS LINE TO RUN THIS EXAMPLE
///////////////////////////////////////////////////////////////////////////////////
const raw = JSON.stringify({
"user_app_id": {
"user_id": USER_ID,
"app_id": APP_ID
},
"inputs": [
{
"data": {
"image": {
"url": IMAGE_URL
}
}
}
]
});
const requestOptions = {
method: 'POST',
headers: {
'Accept': 'application/json',
'Authorization': 'Key ' + PAT
},
body: raw
};
// NOTE: MODEL_VERSION_ID is optional, you can also call prediction with the MODEL_ID only
// https://api.clarifai.com/v2/models/{YOUR_MODEL_ID}/outputs
// this will default to the latest version_id
fetch("https://api.clarifai.com/v2/models/" + MODEL_ID + "/versions/" + MODEL_VERSION_ID + "/outputs", requestOptions)
.then(response => response.text())
.then(result => console.log(result))
.catch(error => console.log('error', error)); -->
</script>
</body>
</html>
72 changes: 68 additions & 4 deletions src/App.js
Original file line number Diff line number Diff line change
@@ -1,20 +1,81 @@
import React from 'react';
import Navigation from './components/Navigation/Navigation';
import Logo from './components/Logo/Logo';
import ImageLinkForm from './components/ImageLinkForm/ImageLinkForm';
import Rank from './components/Rank/Rank';
import FaceRecognition from './components/FaceRecognition/FaceRecognition';
//import Clarifai from 'clarifai';
import './App.css';
import React from 'react';

function App() {
// const app = new Clarifai.App({
// apiKey: 'ad8f48f499b847ad9403887a80cda19e'
// });

const [input, setInput] = React.useState('')
const [imageURL, setImageURL] = React.useState('')
const [box, setBox] = React.useState({})

function calculateFaceLocation(data) {
const parsed = JSON.parse(data)
const clarifaiFace = parsed.outputs[0].data.regions[0].region_info.bounding_box
const image = document.getElementById('inputimage')
const width = Number(image.width)
const height = Number(image.height)
return {
leftCol: clarifaiFace.left_col * width,
topRow: clarifaiFace.top_row * height,
rightCol: width - (clarifaiFace.right_col * width),
bottomRow: height - (clarifaiFace.bottom_row * height)
};
}

function displayFaceBox(box) {
console.log(box)
setBox({box: box})
}

function onInputChange(event) {
console.log(event.target.value)
setInput(event.target.value)
}

function onButtonSubmit() {
console.log('clicked')
setImageURL(input)
const USER_ID = 'nessabyte';
const PAT = '63d9310bff304a7486be5471c990b3fc';
const APP_ID = 'my-first-application';
const MODEL_ID = 'face-detection';
const MODEL_VERSION_ID = '45fb9a671625463fa646c3523a3087d5';
const IMAGE_URL = input;
const raw = JSON.stringify({
"user_app_id": {
"user_id": USER_ID,
"app_id": APP_ID
},
"inputs": [
{
"data": {
"image": {
"url": IMAGE_URL
}
}
}
]
});

const requestOptions = {
method: 'POST',
headers: {
'Accept': 'application/json',
'Authorization': 'Key ' + PAT
},
body: raw
};

fetch("https://api.clarifai.com/v2/models/" + MODEL_ID + "/versions/" + MODEL_VERSION_ID + "/outputs", requestOptions)
.then(response => response.text())
.then(result => displayFaceBox(calculateFaceLocation(result)))
.catch(error => console.log('error', error));
}

return (
Expand All @@ -26,7 +87,10 @@ function App() {
onInputChange={onInputChange}
onButtonSubmit={onButtonSubmit}
/>
{/* <FaceRecognition onInputChange={this.onInputChange} /> */}
<FaceRecognition
imageURL={imageURL}
box={box}
/>
</div>

);
Expand Down
8 changes: 8 additions & 0 deletions src/components/FaceRecognition/FaceRecognition.css
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
.bounding-box{
position: absolute;
box-shadow: 0 0 0 3px #149df2 inset;
display: flex;
flex-wrap: wrap;
justify-content: center;
cursor: pointer;
}
11 changes: 9 additions & 2 deletions src/components/FaceRecognition/FaceRecognition.js
Original file line number Diff line number Diff line change
@@ -1,7 +1,14 @@
import React from 'react';
import './FaceRecognition';

export default function FaceRecognition() {
export default function FaceRecognition({ imageURL, box }) {
return (
console.log("Facecognition")
<div className='center ma'>
<div className='absolute mt2'>
<img id='inputimage' alt='' src={imageURL} width='500px' height='auto' />
<div className='bounding-box' style={{top: box.topRow, right: box.rightCol, bottom: box.bottomRow, left: box.leftCol}}></div>
</div>
{console.log(box)}
</div>
)
}
5 changes: 3 additions & 2 deletions src/index.css
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
body {
margin: 0;
font-family: 'Courier New', Courier, monospace;
font-family: 'EB Garamond', 'Courier New', Courier, monospace;
background: linear-gradient(90deg, #3F2B96 0%, #A8C0FF 100%);
}

button {
cursor: pointer;
}
}

0 comments on commit d70384e

Please sign in to comment.