@article{2017arXiv171202494L,
archivePrefix = {arXiv},
arxivId = {cs.CV/1712.02494},
author = {Lu, Jiajun and Sibai, Hussein and Fabry, Evan},
eprint = {1712.02494},
journal = {arXiv e-prints},
keywords = {Computer Science - Artificial Intelligence,Computer Science - Computer Vision and Pattern Rec,Computer Science - Graphics,Computer Science - Machine Learning},
month = {dec},
pages = {arXiv:1712.02494},
primaryClass = {cs.CV},
title = {{Adversarial Examples that Fool Detectors}},
year = {2017}
}
Attacking classifiers is different from attacking detectors.
Propose a method to generate digital and physical adversarial examples that are robust to changes of viewing conditions.
- Stop signs
- Faces
I should get myself familiar with detection and tracking algorithms ASAP.