-
Notifications
You must be signed in to change notification settings - Fork 0
/
index.html
764 lines (685 loc) · 39.5 KB
/
index.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
<!DOCTYPE html>
<!-- saved from url=(0039)http://cs.stanford.edu/people/karpathy/ -->
<html lang="en"><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Mathias Gallardo</title>
<link href="./files/bootstrap.min.css" rel="stylesheet" media="screen">
<link href="./files/style.css" rel="stylesheet">
<link href="./files/css" rel="stylesheet" type="text/css">
<!-- Tracking code -->
<script type="text/javascript" async="" src="./files/ga.js"></script><script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-3698471-13']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script><style type="text/css"></style>
</head>
<body onload="start()">
<div id="header">
<div class="placeholder" data-large="./icons/bg03.jpg" >
<img src="./icons/bg02.jpg" class="img-small">
<div style="padding-bottom: 35.5%;"></div>
</div>
<div id="headerblob">
<img src="./icons/me.jpg" class="img-circle imgme">
<div id="headertext">
<div id="htname">Mathias Gallardo</div>
<div id="htdesc">Engineer/PhD in Computer Vision/Machine Learning and Medical Imaging</div>
<div id="htem">Mathias.Gallardo _at_ gmail.com</div>
<div id="icons">
<div class="svgico">
<a href="https://www.linkedin.com/in/mathias-gallardo/"><img src="./icons/11-linkedin.svg" width="44px"></a>
</div>
<div class="svgico">
<a href="http://www.viadeo.com/p/002mzxjtqi495wf"><img src="./icons/viadeo.png" width="44px"></a>
</div>
<!-- <div class="svgico">
<a href="https://github.com/MathiasGallardo"><img src="./icons/octocat.svg" width="48px"></a>
</div> -->
<div class="svgico">
<a href="https://www.researchgate.net/profile/Mathias_Gallardo"><img src="./icons/researchgate.png" width="44px"></a>
</div>
<div class="svgico">
<a href="https://drive.google.com/file/d/1GxiLMRkd4MN8JgPzTB5lsxzS7M2Fi_SU/view?usp=share_link"><img src="./icons/CV.svg" width="150px"></a>
</div>
</div>
</div>
</div>
</div>
<div class="container" style="font-size:18px; font-weight:300;margin-top:0px;margin-bottom:50px;">
<b>Bio</b>. With a background in computer vision and ML/DL and a post-doctoral experience in IA for ophthalmology, I am willing to support healthcare specialists in their practices to have a significant impact using data analysis and algorithms. Indeed, through my experiences as PhD student in <a href="http://igt.ip.uca.fr/encov/">EnCoV</a> (Endoscopy and Computer Vision) research group and postdoctoral researcher in the <a href="https://www.artorg.unibe.ch/research/aimi/index_eng.html">AIMI</a> (Artificial Intelligence in Medical Imaging) research group, I benefited from experts in their fields to gather various knowledge and skills to contribute on theoretical problems of computer vision and support clinicians on their practices with learning-based algorithms</a>.
<br><br>
<b>Current situation</b>. Since April 2023, I am working as a Data Scientist in the Data Science And Innovation Team in the <a href="https://www.fo-rothschild.fr/professionnel">Hôpital Fondation Adolphe de Rothschild</a> in Paris, France. One of my main projects is the <a href="https://evired.org/">EviRed</a> project, which aims to replace the current classification of diabetic retinopathy using IA.
<br><br>
See my <a href="https://drive.google.com/file/d/1GxiLMRkd4MN8JgPzTB5lsxzS7M2Fi_SU/view?usp=share_link">CV</a> for more details.
</div>
<hr class="soft">
<div class="container">
<!--<h2>Publications</h2>-->
<!-- <h1>Publications: Peer-Reviewed International Journals</h1> -->
<h1 style="text-align: center;margin-bottom: 2cm;">Publications: Peer-Reviewed International Journals</h1>
<div id="pubs">
<div class="pubwrap">
<div class="row">
<div class="col-md-6">
<div class="pubimg">
<img src="./works/2023_SciReport/2023_GamazoTejero_etal_SciRep_PredictBiomarkerFromWeakAnnotations.png">
</div>
</div>
<div class="col-md-6">
<div class="pub">
<div class="pubt">Predicting OCT biological marker localization from weak annotations</div>
<div class="pubd">In this paper, we propose a method that automatically locates biological markers to the ETDRS rings, only requiring B-scan-level presence SRF and IRF annotations. The neural network outputs were mapped into the corresponding ETDRS rings. We incorporated the class annotations and domain knowledge into a loss function to constrain the output with biologically plausible solutions. The method outperforms previous baselines even in the most challenging scenarios and also shows consistent en-face segmentation despite not incorporating volume information in the training process.</div>
<div class="puba">Javier Gamazo Tejero, Pablo Marquez-Neila, Thomas Kurmann, <u>Mathias Gallardo</u>, Martin Sebastian Zinkernagel, Sebastian Wolf, Raphael Sznitman</div>
<div class="pubv">Scientific Report, published November 2023</div>
<div class="publ">
<ul>
<li><a href="https://drive.google.com/file/d/1EUKf-U20SsvHZXEo7d0rdMqGH70lBM4A/view?usp=sharing">PDF</a></li>
<li><a href="https://www.nature.com/articles/s41598-023-47019-6">Editor</a></li>
</ul>
</div>
</div>
</div>
</div>
</div>
<div class="pubwrap">
<div class="row">
<div class="col-md-6">
<div class="pubimg">
<img src="./works/2023_IJCARS/2023_Hayoz_etal_IJCARS_LearningPoseEndoscope.png">
</div>
</div>
<div class="col-md-6">
<div class="pub">
<div class="pubt">Learning how to robustly estimate camera pose in endoscopic videos</div>
<div class="pubd">Surgical scene understanding plays a critical role in the technology stack of tomorrow’s intervention-assisting systems in endoscopic surgeries. For this, tracking the endoscope pose is a key component, but remains challenging due to illumination conditions, deforming tissues and the breathing motion of organs. We propose a solution for stereo endoscopes that estimates depth and optical flow to minimize two geometric losses for camera pose estimation. Most importantly, we introduce two learned adaptive per-pixel weight mappings that balance contributions according to the input image content. To do so, we train a Deep Declarative Network to take advantage of the expressiveness of deep learning and the robustness of a novel geometric-based optimization approach.</div>
<div class="puba">Michel Hayoz, Christopher Hahne, <u>Mathias Gallardo</u>, Daniel Candinas, Thomas Kurmann, Maximilian Allan, Raphael Sznitman</div>
<div class="pubv">International Journal of Computer Assisted Radiology and Surgery, published May 2023</div>
<div class="publ">
<ul>
<li><a href="https://drive.google.com/file/d/1GYGTaBRd4RpEGivGH9Ely2s9pkItrmPX/view?usp=sharing">PDF</a></li>
</ul>
</div>
</div>
</div>
</div>
</div>
<div class="pubwrap">
<div class="row">
<div class="col-md-6">
<div class="pubimg">
<img src="./works/2022_Ophthalmologica/2022_Habra_etal_Ophthalmologica_EvalAIFluid.png">
</div>
</div>
<div class="col-md-6">
<div class="pub">
<div class="pubt">Evaluation of an Artificial Intelligence-based Detector of Sub- and Intra-Retinal Fluid on a large set of OCT volumes in AMD and DME</div>
<div class="pubd">In this retrospective cohort study, we wanted to evaluate the performance of an artificial intelligence (AI) algorithm in detecting retinal fluid in spectral-domain OCT volume scans from a large cohort of patients with neovascular age-related macular degeneration (AMD) and diabetic macular edema (DME).</div>
<div class="puba">Oussama Habra, <u>Mathias Gallardo</u>, Till Meyer zu Westram, Sandro De Zanet, Damian Jaggi, Martin S. Zinkernagel, Sebastian Wolf, Raphael Sznitman</div>
<div class="pubv">Ophthalmologica, published October 2022</div>
<div class="publ">
<ul>
<li><a href="https://drive.google.com/file/d/1zrG5oKSS_nNBhvLl8-LJu8QkNrL7sRwh/view?usp=sharing">PDF</a></li>
</ul>
</div>
</div>
</div>
</div>
</div>
<div class="pubwrap">
<div class="row">
<div class="col-md-6">
<div class="pubimg">
<img src="./works/2021_ORET/2021_Gallardo_etal_ORET_TreatmentDemandPrediction.jpg">
</div>
</div>
<div class="col-md-6">
<div class="pub">
<div class="pubt">Machine learning can predict anti-VEGF treatment demand in a Treat-and-Extend regimen for patients with nAMD, DME and RVO associated ME</div>
<div class="pubd">In this work, we aimed to observe the practical feasibility of machine learning-based prediction technique for routinely collected retrospective clinical cohorts considering three different pathologies (nAMD, DME and RVO related ME). Precisely, we wish to observe the feasibility of predicting the long-term demand of anti-VEGF medication at the early stage of a one-year TER regimen in a routine clinical setting. For each pathology group, we trained two Random Forest classifiers for identifying low and high demanders and analyzed in detail their performance and the consistency of the most important features used with those leveraged by clinicians.</div>
<div class="puba"><u>Mathias Gallardo</u>, Marion R. Munk, Thomas Kurmann, Sandro De Zanet, Agata Mosinska, Isıl Kutlutürk Karagoz, Martin S. Zinkernagel, Sebastian Wolf, Raphael Sznitman</div>
<div class="pubv">Ophthalmology Retina, published July 2021</div>
<div class="publ">
<ul>
<li><a href="https://drive.google.com/file/d/1u9_VLm9n34pycgSM52GrIRBRvEiO2riS/view?usp=sharing">PDF</a></li>
<li><a href="https://www.sciencedirect.com/science/article/pii/S2468653021001615">Editor</a></li>
</ul>
</div>
</div>
</div>
</div>
</div>
<div class="pubwrap">
<div class="row">
<div class="col-md-6">
<div class="pubimg">
<img src="./works/2019_CurveSfT/2019_Gallardo_etal_IJCV_CurveSfT_image.jpg">
</div>
</div>
<div class="col-md-6">
<div class="pub">
<div class="pubt">Shape-from-Template with Curves</div>
<div class="pubd">We propose a considerable extension of the work on “Shape-from-Template in Flatland” by [Gallardo et al., 2015] in four ways. The first way is to extend the solutions and the theoretical analysis to all sub-cases of Curve SfT; in [Gallardo et al., 2015] only the case of 2D curve reconstruction was studied. The second way is our discrete graphical method that can generate all candidate solutions; in [Gallardo et al., 2015], only a single solution could be generated. The third way is an improved method to detect critical points which has better stability than the method in [Gallardo et al., 2015]. The fourth way is a larger quantitative evaluation on real and simulated datasets.</div>
<div class="puba"><u>Mathias Gallardo</u>, Daniel Pizarro, Toby Collins, Adrien Bartoli</div>
<div class="pubv">International Journal Computer Vision, accepted August 2019</div>
<div class="publ">
<ul>
<li><a href="https://drive.google.com/open?id=1XHs4gMFECJWtyQAA1UYkpgpUrzkWqzyG">PDF</a></li>
</ul>
</div>
</div>
</div>
</div>
</div>
<hr class="soft">
<div class="container">
<h1 style="text-align: center;margin-bottom: 2cm;">Publications: Peer-Reviewed International Conferences</h1>
<div id="pubs">
<div class="pubwrap">
<div class="row">
<div class="col-md-6">
<div class="pubimg">
<img src="./works/2021_MICCAI/2021_Marafioti_etal_MICCAI_CataNet.jpg">
</div>
</div>
<div class="col-md-6">
<div class="pub">
<div class="pubt">CataNet: Predicting remaining cataract surgery duration</div>
<div class="pubd">Cataract surgery is a sight saving surgery that is performed over 10 million times each year around the world. With such a large demand, the ability to organize surgical wards and operating rooms efficiently is critical to delivery this therapy in routine clinical care. In this context, estimating the remaining surgical duration (RSD) during procedures is one way to help streamline patient throughput and workflows. To this end, we propose CataNet, a method for cataract surgeries that predicts in real time the RSD jointly with two influential elements: the surgeon’s experience, and the current phase of the surgery.</div>
<div class="puba">Andrés Marafioti, Michel Hayoz, <u>Mathias Gallardo</u>, Pablo Márquez Neila, Sebastian Wolf, Martin Zinkernagel, Raphael Sznitman</div>
<div class="pubv">MICCAI - International Conference on Medical Image Computing and Computer Assisted Intervention, 2021</div>
<div class="publ">
<ul>
<li><a href="https://drive.google.com/file/d/15zDEmuI4bEZ1fUo8U1QAZmaj6kFMyJyg/view?usp=sharing">PDF</a></li>
<li><a href="https://link.springer.com/chapter/10.1007/978-3-030-87202-1_41">Editor</a></li>
</ul>
</div>
</div>
</div>
</div>
</div>
<div class="pubwrap">
<div class="row">
<div class="col-md-6">
<div class="pubimg">
<img src="./works/2017_NRSfMS/2017_Gallardo_etal_ICCV_NRSfMS_image.jpg">
</div>
</div>
<div class="col-md-6">
<div class="pub">
<div class="pubt">Dense Non-Rigid Structure-from-Motion and Shading with Unknown Albedos</div>
<div class="pubd">Non-Rigid Structure-from-Motion (NRSfM) is a 3D reconstruction method which only uses a sequence of images where a surface deforms and recovers the 3D shape of the surface visible in each image. Current limitation is that NRSfM methods do not handle poorly-textured surfaces that deform non-smoothly. We show that combining NRSfM with shading constraint allows to estimate dense surfaces under non-smooth deformations.</div>
<div class="puba"><u>Mathias Gallardo</u>, Toby Collins, Adrien Bartoli</div>
<div class="pubv">ICCV - IEEE International Conference on Computer Vision, 2017</div>
<div class="pubv">Poster - Acceptance rate 28.9%</div>
<div class="publ">
<ul>
<li><a href="https://drive.google.com/open?id=0B8X5X_qfO1MkdW9hV1poTm90YkE">PDF</a></li>
<li><a href="https://drive.google.com/open?id=0B8X5X_qfO1MkeDhKTnIxMnU4cHM">Supp</a></li>
<li><a href="https://drive.google.com/open?id=0B8X5X_qfO1MkSlpsVkFCMl8tSlU">Poster</a></li>
<li><a href="https://youtu.be/Dacl70Sgg7Q">Video</a></li>
</ul>
</div>
</div>
</div>
</div>
</div>
<div class="pubwrap">
<div class="row">
<div class="col-md-6">
<div class="pubimg">
<img src="./works/2016_SfTS/2016_Gallardo_etal_BMVC_SfTshading_image.jpg">
</div>
</div>
<div class="col-md-6">
<div class="pub">
<div class="pubt">Using Shading and a 3D Template to Reconstruct Complex Surface Deformations</div>
<div class="pubd">We propose to push one of the limitations of current Shape-from-Template (SfT) methods: reconstructing complex deformations on poorly-textured surfaces. For this, we combine SfT with shading constraints in an integrated optimization framework. We also propose a cascaded initialization: it uses a batch of images to estimate the 3D surface shapes (visible in each image), the illumination, the camera responses and the surface albedos which all are required to use shading.</div>
<div class="puba"><u>Mathias Gallardo</u>, Toby Collins, Adrien Bartoli</div>
<div class="pubv">BMVC - British Machine Vision Conference, 2016</div>
<div class="pubv">Poster - Acceptance rate 39%</div>
<div class="publ">
<ul>
<li><a href="https://drive.google.com/open?id=0B8X5X_qfO1MkWXFiWlktMkxyZE0">PDF</a></li>
<li><a href="https://drive.google.com/open?id=0B8X5X_qfO1MkSmV0aWk1X1dIeEU">Poster</a></li>
</ul>
</div>
</div>
</div>
</div>
</div>
<div class="pubwrap">
<div class="row">
<div class="col-md-6">
<div class="pubimg">
<img src="./works/2016_CSfT/2016_Gallardo_etal_RFIA_SfTcreases_image.jpg">
</div>
</div>
<div class="col-md-6">
<div class="pub">
<div class="pubt">Can we Jointly Register and Reconstruct Creased Surfaces by Shape-from-Template Accurately?</div>
<div class="pubd">We investigate how to solve the Shape-from-Template problem for creased surfaces. We propose two new components and add them in a non-convex refinement: a crease-preserving smoothing term based on M-estimator and a robust boundary term to improve the surface registration. </div>
<div class="puba"><u>Mathias Gallardo</u>, Toby Collins, Adrien Bartoli</div>
<div class="pubv">ECCV - European Conference on Computer Vision, 2016</div>
<div class="pubv">Poster - Acceptance rate 26.6%</div>
<div class="publ">
<ul>
<li><a href="https://drive.google.com/open?id=0B8X5X_qfO1MkQnBZYndHdVV0cU0">PDF</a></li>
<li><a href="https://drive.google.com/open?id=0B8X5X_qfO1MkYy1KSkhWWTJpWGc">Supp</a></li>
<li><a href="https://drive.google.com/open?id=0B8X5X_qfO1MkcFhSNmdmcGs1c2M">Poster</a></li>
<li><a href="https://youtu.be/PMhx7nJeVRo">Video</a></li>
<li><a href="https://drive.google.com/open?id=0B8X5X_qfO1Mkd2pldDJzYUxhdjA">Slides</a></li>
</ul>
</div>
</div>
</div>
</div>
</div>
<div class="pubwrap">
<div class="row">
<div class="col-md-6">
<div class="pubimg">
<img src="./works/2015_1DSfT/2015_Gallardo_etal_CVPR_1DSfT_image.jpg">
</div>
</div>
<div class="col-md-6">
<div class="pub">
<div class="pubt">Shape-from-Template in Flatland</div>
<div class="pubd">Shape-from-Template (SfT) is the problem of inferring the shape of a deformable object as observed in an image using a shape template. We address the special case of SfT, called 1DSfT, where the template is a 1D line and the input image is 2D. 1DSfT appears to be not so easy compared to the usual SfT since multiple solutions exist. We propose here a theoretical study and two computational solutions with simulated and real datasets.</div>
<div class="puba"><u>Mathias Gallardo</u>, Daniel Pizarro, Toby Collins, Adrien Bartoli</div>
<div class="pubv">CVPR - IEEE International Conference on Computer Vision and Pattern Recognition, 2015</div>
<div class="pubv">Poster - Acceptance rate 28.4%</div>
<div class="publ">
<ul>
<li><a href="https://drive.google.com/open?id=0B8X5X_qfO1MkcDBzMjNVTExPeVk">PDF</a></li>
<li><a href="https://drive.google.com/open?id=0B8X5X_qfO1MkOHZodU01cWVZQk0">Poster</a></li>
<li><a href="https://drive.google.com/open?id=0B8X5X_qfO1MkZ2ZuRHMtYjc0WTQ">Slides</a></li>
</ul>
</div>
</div>
</div>
</div>
</div>
<hr class="soft">
<div class="container">
<h1 style="text-align: center;margin-bottom: 2cm;">Abstracts</h1>
<div id="pubs">
<div class="pubwrap">
<div class="row">
<div class="col-md-6">
<div class="pubimg">
<img src="./works/2023_ARVO/2023_ARVO_CNV.jpeg">
</div>
</div>
<div class="col-md-6">
<div class="pub">
<div class="pubt">Biomarker assessment for CNV development prediction in multifocal choroiditis (MFC) and punctate inner choroidopathy (PIC): A large, longitudinal, multicenter study on patients with MFC and PIC using an artificial intelligence-based OCT fluid and biomarker detector</div>
<div class="pubd">Secondary choroidal neovascularization (CNV) represents the major cause of vision loss in idiopathic MFC and PIC. This study assessed potential biomarkers on OCT to predict the development of CNV using an artificial intelligence (AI)- based software.</div>
<div class="puba">Lorenzo Ferro Desideri, <u>Mathias Gallardo</u>, Muriel Ott, Ariel Schlaen, Debra Goldstein, H Nida Sen, Maurizio Battaglia Parodi, Vita S Dingerkus, Yael Sharon, Michal Kramer, Siqing Yu, Sandro De Zanet, Marion Ronit Munk</div>
<div class="pubv">Investigative Ophthalmology & Visual Science, June 2023</div>
<div class="pubv">Abstract</div>
<div class="publ">
<ul>
<li><a href="https://iovs.arvojournals.org/article.aspx?articleid=2789902">Online abstract</a></li>
</ul>
</div>
</div>
</div>
</div>
</div>
<div class="pubwrap">
<div class="row">
<div class="col-md-6">
<div class="pubimg">
<img src="./works/2023_ARVO/2023_ARVO_atrophy_biomarker_detector.jpeg">
</div>
</div>
<div class="col-md-6">
<div class="pub">
<div class="pubt">Ensemble and Majority-Vote Strategies for Deep-Learning Based Detection of Atrophy-Related Biomarkers in OCT Volumes </div>
<div class="pubd">To build a detection model for atrophy-related biomarkers in OCT Bscans of AMD patients and explore multiple training strategies using multi-grader annotations.</div>
<div class="puba">Davide Scandella, <u>Mathias Gallardo</u>, Raphael Sznitman, Martin Sebastian Zinkernagel, Sebastian Wolf</div>
<div class="pubv">Investigative Ophthalmology & Visual Science, June 2023</div>
<div class="pubv">Abstract</div>
<div class="publ">
<ul>
<li><a href="https://iovs.arvojournals.org/article.aspx?articleid=2789792">Online abstract</a></li>
</ul>
</div>
</div>
</div>
</div>
</div>
<div class="pubwrap">
<div class="row">
<div class="col-md-6">
<div class="pubimg">
<img src="./works/2023_ARVO/2023_ARVO_weak_annotations.jpeg">
</div>
</div>
<div class="col-md-6">
<div class="pub">
<div class="pubt">Deep-learning model to localize biological markers on OCT volumes from weak annotations</div>
<div class="pubd">Recent developments in deep learning have shown success in accurately predicting the location of biological markers in OCT volumes of patients with Age-Related Macular Degeneration (AMD) and Diabetic Retinopathy (DR). This approach has the potential to improve clinical practices and advance medical research. However, producing fine annotations to train these algorithms is burdensome for experts. We propose a method that automatically identifies and assigns biological markers to the ETDRS rings, only requiring B-scan-level presence annotations.</div>
<div class="puba">Javier Gamazo Tejero, Pablo Marquez-Neila, Thomas Kurmann, <u>Mathias Gallardo</u>, Martin Sebastian Zinkernagel, Sebastian Wolf, Raphael Sznitman</div>
<div class="pubv">Investigative Ophthalmology & Visual Science, June 2023</div>
<div class="pubv">Abstract</div>
<div class="publ">
<ul>
<li><a href="https://iovs.arvojournals.org/article.aspx?articleid=2789793">Online abstract</a></li>
</ul>
</div>
</div>
</div>
</div>
</div>
<div class="pubwrap">
<div class="row">
<div class="col-md-6">
<div class="pubimg">
<img src="./works/2022_EURETINA/2022_EURETINA_EvalAIFluid.png">
</div>
</div>
<div class="col-md-6">
<div class="pub">
<div class="pubt">Evaluation of an Artificial Intelligence-based Detector of Sub- and Intra-Retinal Fluid on a large set of OCT volumes in AMD and DME Patients</div>
<div class="pubd">To evaluate the performance of an artificial intelligence (AI) algorithm in detecting retinal fluid in spectral-domain OCT volume scans from a large cohort of patients with neovascular age-related macular degeneration (AMD) and diabetic macular edema (DME).</div>
<div class="puba">Oussama Habra, <u>Mathias Gallardo</u>, Till Meyer zu Westram, Damian Jaggi, Sandro De Zanet, Martin S. Zinkernagel, Sebastian Wolf, Raphael Sznitman</div>
<div class="pubv">EURETINA, August 2022</div>
<div class="pubv">Abstract</div>
<div class="publ">
<ul>
<li><a href="https://euretina.softr.app/abstract?recordId=reckFC0vIcyNi6SXx">Online abstract</a></li>
</ul>
</div>
</div>
</div>
</div>
</div>
<div class="pubwrap">
<div class="row">
<div class="col-md-6">
<div class="pubimg">
<img src="./works/2022_ARVO/2022_ARVO_EvalAIRetina.png">
</div>
</div>
<div class="col-md-6">
<div class="pub">
<div class="pubt">Evaluating an OCT-based Algorithm of Central Subfield Thickness Estimation on AMD and DME patients</div>
<div class="pubd"> To evaluate the accuracy of an algorithm to estimate Central Subfield Thickness from OCT volumes for patients with AMD or DME.</div>
<div class="puba"><u>Mathias Gallardo</u>, Oussama Habra, Till Meyer zu Westram, Sandro De Zanet, Sebastian Wolf, Raphael Sznitman, Martin S. Zinkernagel</div>
<div class="pubv">Investigative Ophthalmology & Visual Science June 2022, Vol.63, 2993</div>
<div class="pubv">Abstract</div>
<div class="publ">
<ul>
<li><a href="https://iovs.arvojournals.org/article.aspx?articleid=2780625">Online abstract</a></li>
<li><a href="https://drive.google.com/file/d/1IzUUBHttwTnKFPagdYUFeuvqSL5efFpn/view?usp=sharing">Poster</a></li>
</ul>
</div>
</div>
</div>
</div>
</div>
<div class="pubwrap">
<div class="row">
<div class="col-md-6">
<div class="pubimg">
<img src="./works/2021_IOVS/2021_IOVS_FoveaOpticDisk.png">
</div>
</div>
<div class="col-md-6">
<div class="pub">
<div class="pubt">Automated fovea and optic disc detection in the presence of occlusions in Fundus SLO Images</div>
<div class="pubd">To develop and validate a machine learning algorithm for accurate estimation of the optic disc and fovea center position in infra-red SLO fundus images including cases outside of the field of view or apparent occlusions of the landmarks.</div>
<div class="puba">Marc Stadelmann, Agata Mosinska, <u>Mathias Gallardo</u>, Raphael Sznitman, Marion Munk, Stefanos Apostolopoulos, Carlos Ciller, Sandro De Zanet</div>
<div class="pubv">Investigative Ophthalmology & Visual Science June 2021, Vol.62, 109</div>
<div class="pubv">Abstract</div>
<div class="publ">
<ul>
<li><a href="https://iovs.arvojournals.org/article.aspx?articleid=2774226">Online abstract</a></li>
</ul>
</div>
</div>
</div>
</div>
</div>
<div class="pubwrap">
<div class="row">
<div class="col-md-6">
<div class="pubimg">
<img src="./works/2020_IOVS/2020_IOVS_ResponsePredictionTRE.png">
</div>
</div>
<div class="col-md-6">
<div class="pub">
<div class="pubt">Machine learning to predict anti-VEGF treatment response in a Treat-and-Extend regimen (TRE)</div>
<div class="pubd">This abstract encompasses our findings on the capabilities of a machine learning approach to predict treatment response of patients with wAMD, DME and RVO treated according to a TER.</div>
<div class="puba"><u>Mathias Gallardo</u>, Marion Munk, Thomas Kurmann, Sandro De Zanet, Agata Mosinska, Mark van Grinsven, Clara I. Sanchez, Martin S. Zinkernagel, Sebastian Wolf, Raphael Sznitman</div>
<div class="pubv">Investigative Ophthalmology & Visual Science June 2020, Vol.61, 1629</div>
<div class="pubv">Abstract</div>
<div class="publ">
<ul>
<li><a href="https://drive.google.com/file/d/14Enp9LECrIZpZytGsM4ABYzUaocuXHD0/view?usp=sharing">Abstract</a></li>
<li><a href="https://iovs.arvojournals.org/article.aspx?articleid=2767143&resultClick=1">Online abstract</a></li>
</ul>
</div>
</div>
</div>
</div>
</div>
<hr class="soft">
<div class="container">
<h1 style="text-align: center;margin-bottom: 2cm;">Challenges</h1>
<div id="pubs">
<div class="pubwrap">
<div class="row">
<div class="col-md-6">
<div class="pubimg">
<img src="./works/2020_MICCAI/2020_MICCAI_CataractChallenge.png">
</div>
</div>
<div class="col-md-6">
<div class="pub">
<div class="pubt">A Deep-Learning Based Cataract Workflow Analysis</div>
<div class="pubd">In this work, we propose a deep-learning approach for surgical steps prediction in the cataract surgeries. Based on the previous work of (Y. Jin et al., 2019), we train an ensemble model to solve simultaneously the multi-label tool classification and the multi-task surgical step classification problems and use as regularizer a correlation loss to take advantage of the relationships between steps and tools. Each submodel combines a CNN for visual feature extraction and an RNN operating on the sequence of extracted features to perform the surgical step classification.</div>
<div class="puba">Michel Hayoz, <u>Mathias Gallardo</u>, Pablo Márquez Neila, Martin S. Zinkernagel, Raphael Sznitman</div>
<div class="pubv">CATARACTS Workflow, part of the EndoVis Challenge, MICCAI 2020</div>
<div class="pubv">Code & Report - Ranked 1st over 5 international participants</div>
<div class="publ">
<ul>
<li><a href="https://drive.google.com/file/d/11WKGGQfBuhKofunjLcpYBzy313RBUn0T/view?usp=sharing">Report</a></li>
<li><a href="https://cataracts2020.grand-challenge.org/">Challenge Website</a></li>
<li><a href="https://www.artorg.unibe.ch/about_us/news/2020/artorg_wins_miccai_2020_cataract_surgery_challenge/index_eng.html">News</a></li>
</ul>
</div>
</div>
</div>
</div>
</div>
<hr class="soft">
<div class="container">
<h1 style="text-align: center;margin-bottom: 2cm;">Book Chapter</h1>
<div id="pubs">
<div class="pubwrap">
<div class="row">
<div class="col-md-6">
<div class="pubimg">
<img src="./works/2020_AP3DR/2020_Gallardo_etal_AP3DR_nrsfms.jpg">
</div>
</div>
<div class="col-md-6">
<div class="pub">
<div class="pubt">Non-Rigid Structure-from-Motion and Shading</div>
<div class="pubd">We show how photometric and motion-based approaches can be combined to reconstruct the 3D shape of deformable objects from monocular images. We start by motivating the problem using real-world applications. We give a comprehensive overview of the state-of-the-art approaches and discuss their limitations for practical use in these applications. We then introduce the problem of Non-Rigid Structure-from-Motion and Shading (NRSfMS), where photometric and geometric information are used for reconstruction, without prior knowledge about the shape of the deformable object. We present in detail the first technical solution to NRSfMS and close the chapter with the main remaining open problems.</div>
<div class="puba"><u>Mathias Gallardo</u>, Toby Collins, Adrien Bartoli</div>
<div class="pubv">Advances in Photometric 3D-Reconstruction, J.-D. Durou, M. Falcone, Y. Quéau and S. Tozza (Eds.), Springer, 2020</div>
<div class="publ">
<ul>
<li><a href="https://drive.google.com/file/d/1RX9wSp4OW-NVtdM5dBzWqHbtc0fw6RW-/view?usp=sharing">PDF</a></li>
</ul>
</div>
</div>
</div>
</div>
</div>
<hr class="soft">
<div class="container">
<h1 style="text-align: center;margin-bottom: 2cm;">National Journals and Conference Proceedings</h1>
<div id="pubs">
<div class="pubwrap">
<div class="container" style="font-size:18px; font-weight:300;margin-top:-1cm;margin-bottom:0px;">
<div class="pubt">Utilisation de la photométrie et d'un patron pour la reconstruction de surfaces pliées et la calibration photométrique</div>
<div class="puba"><u>Mathias Gallardo</u>, Toby Collins, Adrien Bartoli</div>
<div class="pubv">Traitement du signal, GRETSI-CNRS, special issue: selected papers from RFIA 2016, accepted May 2017</div>
</div>
</div>
<div class="pubwrap">
<div class="container" style="font-size:18px; font-weight:300;margin-top:-1cm;margin-bottom:0px;">
<div class="pubt">Recalage et Reconstruction 3D de Surfaces Pliées par Shape-from-Template</div>
<div class="puba"><u>Mathias Gallardo</u>, Toby Collins, Adrien Bartoli</div>
<div class="pubv">RFIA - Congrès Francophone de Reconnaissance des Formes et Intelligence Artificielle, Clermont-Ferrand, 2016 (Oral)</div>
</div>
</div>
<div class="pubwrap">
<div class="container" style="font-size:18px; font-weight:300;margin-top:-1cm;margin-bottom:0px;">
<div class="pubt">Shape-from-Template dans Flatland</div>
<div class="puba"><u>Mathias Gallardo</u>, Daniel Pizarro, Adrien Bartoli, Toby Collins</div>
<div class="pubv">ORASIS - Congrès Francophone des Jeunes Chercheurs en Vision par Ordinateur, Amiens, 2015 (Oral)</div>
</div>
</div>
<hr class="soft">
<hr class="soft">
<div class="container">
<h1 style="text-align: center;margin-bottom: 2cm;">PhD Dissertation</h1>
<div id="pubs">
<div class="pubwrap">
<div class="row">
<div class="col-md-6">
<div class="pubimg">
<img src="./works/2018_defense/2018_Gallardo_defense_image.png">
</div>
</div>
<div class="col-md-6">
<div class="pub">
<div class="pubt">Contributions to </div>
<div class="pubt">Monocular Deformable 3D Reconstruction:</div>
<div class="pubt">Curvilinear Objects and Multiple Visual Cues</div>
<div class="puba"><u>Mathias Gallardo</u></div>
<div class="pubv">Université Clermont Auvergne, September 2018</div>
<div class="pubv">Supervisors: Toby Collins and Adrien Bartoli</div>
<div class="publ">
<ul>
<li><a href="https://drive.google.com/open?id=1ZM6O2Vi9ar1Q7Yj4MWkxRO-7tIIUF4LJ">Thesis (PDF - HD)</a></li>
<li><a href="https://drive.google.com/open?id=1F7qcBDurIOc-TSVBjpHcGlhc8XYD54E3">Thesis (PDF - SD)</a></li>
<li><a href="https://drive.google.com/open?id=1a9_7JCxhpPnGssVVyCCSZF6EMbuGWbQZ">Slides (PDF)</a></li>
<li><a href="https://drive.google.com/open?id=114vAo9k8g26AAEffUp73mN9pEaDdyXdp">Slides (PPTX)</a></li>
</ul>
</div>
</div>
</div>
</div>
</div>
<hr class="soft">
<div class="container">
<h1 style="text-align: center;margin-bottom: 2cm;">Work Experiences</h1>
<div class="row">
<div class="col-md-1">
<div class="pp">
<a href="">
<img src="" class="img-circle imgb">
</a>
<div class="ppt"></div>
<div class="ppd">
</div>
</div>
</div>
<div class="col-md-5">
<div class="pp">
<a href="https://www.idemia.com/">
<img src="./icons/morpho.jpg" class="img-circle imgb">
</a>
<div class="ppt">Image restoration for biometrics - M2 internship</div>
<div class="ppd">
In 2013, I could complete my 6 months internship in <a href="https://www.idemia.com/">Morpho</a> (Osny, France), now Idemia, world leader in digital security and identification technologies.
My project was to propose a method of estimation and correction of the inherent blur (Point Spread Function) for fingerprint modules.
</div>
<!-- <div class="publ">
<ul>
<li><a href="https://drive.google.com/open?id=0B8X5X_qfO1MkMlVjU0ZQdTBEcTQ">PDF</a></li>
</ul>
</div> -->
</div>
</div>
<div class="col-md-5">
<div class="pp">
<a href="https://www.soitec.com/en">
<img src="./icons/soitec.jpg" class="img-circle imgb">
</a>
<div class="ppt">Defects Detection on Silicon Wafers - M1 internship</div>
<div class="ppd">
In 2012, I could work during 3 months on an industrial project at <a href="https://www.soitec.com/en">Soitec</a> (Bernin, France), world leader in semiconductor materials.
The objective was to propose and implement an algorithm of automatic detection of manufacturing defects on silicon wafers using IR images.
</div>
<!-- <div class="publ">
<ul>
<li><a href="https://drive.google.com/open?id=0B8X5X_qfO1MkZ1lsUnNSb0JVZVk">PDF</a></li>
</ul>
</div> -->
</div>
</div>
<div class="col-md-1">
<div class="pp">
<a href="">
<img src="" class="img-circle imgb">
</a>
<div class="ppt"></div>
<div class="ppd">
</div>
</div>
</div>
</div>
<div class="footer">
© Mathias Gallardo, 2024
</div>
<!-- place js at end for faster loading -->
<script src="./files/jquery-1.11.1.min.js"></script>
<script src="./files/bootstrap.min.js"></script>
<script src="./files/load_image.js"></script>
<script>
var more_projects_shown = false;
function start() {
$("#showmoreprojects").click(function() {
if(!more_projects_shown) {
$("#moreprojects").slideDown('fast', function() {
$("#showmoreprojects").text('hide');
});
more_projects_shown = true;
} else {
$("#moreprojects").slideUp('fast', function() {
$("#showmoreprojects").text('show more');
});
more_projects_shown = false;
}
});
var more_pubs_shown = false;
$("#showmorepubs").click(function() {
if(!more_pubs_shown) {
$("#morepubs").slideDown('fast', function() {
$("#showmorepubs").text('hide');
});
more_pubs_shown = true;
} else {
$("#morepubs").slideUp('fast', function() {
$("#showmorepubs").text('show more');
});
more_pubs_shown = false;
}
});
}
</script>
<div id="point-jawn" style="z-index: 2147483647;"></div></body></html>