-
Notifications
You must be signed in to change notification settings - Fork 0
/
citations.bib
102 lines (90 loc) · 3.74 KB
/
citations.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
@article{ibm,
author = {Michele Merler and
Nalini K. Ratha and
Rog{\'{e}}rio Schmidt Feris and
John R. Smith},
title = {Diversity in Faces},
journal = {CoRR},
volume = {abs/1901.10436},
year = {2019},
url = {http://arxiv.org/abs/1901.10436},
archivePrefix = {arXiv},
eprint = {1901.10436},
timestamp = {Sun, 03 Feb 2019 14:23:05 +0100},
biburl = {https://dblp.org/rec/bib/journals/corr/abs-1901-10436},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@misc{fer-data,
title={Training Deep Networks for Facial Expression Recognition with Crowd-Sourced Label Distribution},
author={Emad Barsoum and Cha Zhang and Cristian Canton Ferrer and Zhengyou Zhang},
year={2016},
eprint={1608.01041},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
@article{google-data,
author = {Raviteja Vemulapalli and
Aseem Agarwala},
title = {A Compact Embedding for Facial Expression Similarity},
journal = {CoRR},
volume = {abs/1811.11283},
year = {2018},
url = {http://arxiv.org/abs/1811.11283},
archivePrefix = {arXiv},
eprint = {1811.11283},
timestamp = {Fri, 30 Nov 2018 12:44:28 +0100},
biburl = {https://dblp.org/rec/bib/journals/corr/abs-1811-11283},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{cifar10,
title= {CIFAR-10 (Canadian Institute for Advanced Research)},
journal= {},
author= {Alex Krizhevsky and Vinod Nair and Geoffrey Hinton},
year= {},
url= {http://www.cs.toronto.edu/~kriz/cifar.html},
abstract= {The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images.
The dataset is divided into five training batches and one test batch, each with 10000 images. The test batch contains exactly 1000 randomly-selected images from each class. The training batches contain the remaining images in random order, but some training batches may contain more images from one class than another. Between them, the training batches contain exactly 5000 images from each class. },
keywords= {Dataset},
terms= {}
}
@inproceedings{ck+,
author = {Lucey, Patrick and Cohn, Jeffrey and Kanade, Takeo and Saragih, Jason and Ambadar, Zara and Matthews, Iain},
year = {2010},
month = {07},
pages = {94 - 101},
title = {The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression},
journal = {2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops, CVPRW 2010},
doi = {10.1109/CVPRW.2010.5543262}
}
@misc{ppgn,
title={Plug & Play Generative Networks: Conditional Iterative Generation of Images in Latent Space},
author={Anh Nguyen and Jeff Clune and Yoshua Bengio and Alexey Dosovitskiy and Jason Yosinski},
year={2016},
eprint={1612.00005},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
@misc{cdcgan,
title={Conditional Generative Adversarial Nets},
author={Mehdi Mirza and Simon Osindero},
year={2014},
eprint={1411.1784},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
@misc{ppgn,
title={Plug & Play Generative Networks: Conditional Iterative Generation of Images in Latent Space},
author={Anh Nguyen and Jeff Clune and Yoshua Bengio and Alexey Dosovitskiy and Jason Yosinski},
year={2016},
eprint={1612.00005},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
@misc{presgan,
title={Prescribed Generative Adversarial Networks},
author={Adji B. Dieng and Francisco J. R. Ruiz and David M. Blei and Michalis K. Titsias},
year={2019},
eprint={1910.04302},
archivePrefix={arXiv},
primaryClass={stat.ML}
}