-
Notifications
You must be signed in to change notification settings - Fork 0
/
index.html
747 lines (651 loc) · 39 KB
/
index.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
<!DOCTYPE html>
<html lang="en">
<head>
<!-- Google tag (gtag.js) -->
<script async src="https://www.googletagmanager.com/gtag/js?id=G-2CVZS8EVKF"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'G-2CVZS8EVKF');
</script>
<!-- -->
<title>Jhair Gallardo</title>
<description>Jhair Gallardo Personal Website</description>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="keywords" content="gianmarco, jhair, gallardo, callalli, rit, rochester institute of technology, new york, computer vision, deep-learning, machine-learning, machine learning, deep learning, artificial intelligence, AI, image, RIT, Rochester, rochester, lifelong learning, continual learning, self-supervised learning, ssl">
<meta name="author" content="Jhair Gallardo">
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css">
<link href="https://fonts.googleapis.com/css?family=Montserrat" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css?family=Lato" rel="stylesheet" type="text/css">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"></script>
<!-- Primary Meta Tags -->
<meta name="title" content="Jhair Gallardo">
<meta name="description" content="My research interest is focused on lifelong machine learning, self-supervised learning, and computer vision.">
<!-- Open Graph / Facebook -->
<meta property="og:type" content="website">
<meta property="og:url" content="https://jhairgallardo.github.io/">
<meta property="og:title" content="Jhair Gallardo">
<meta property="og:description" content="My research interest is focused on lifelong machine learning, self-supervised learning, and computer vision.">
<meta property="og:image" content="images/jhair.jpg">
<!-- Twitter -->
<meta property="twitter:card" content="summary_large_image">
<meta property="twitter:url" content="https://jhairgallardo.github.io/">
<meta property="twitter:title" content="Jhair Gallardo">
<meta property="twitter:description" content="My research interest is focused on lifelong machine learning, self-supervised learning, and computer vision.">
<meta property="twitter:image" content="images/jhair.jpg">
<style>
.newflag {
background-color: #DDD0C8;
color: #323232;
padding: 2px 5px;
border-radius: 5px;
border: 2px solid #DDD0C8;
font: 600 16px Helvetica, sans-serif;
font-family: Helvetica, sans-serif;
}
</style>
<style>
body {
font: 400 15px Helvetica, sans-serif;
font-family: Helvetica, sans-serif;
line-height: 1.8;
color: #323232;
}
img {
width: 100%;
height: auto;
}
h2 {
font-size: 24px;
text-transform: uppercase;
font-weight: 600;
margin-bottom: 20px;
text-align: center;
font-family: Helvetica, sans-serif;
letter-spacing: 4px;
}
h3 {
font-size: 22px;
font-weight: 400;
margin-bottom: 20px;
text-align: left;
}
h4 {
font-size: 19px;
line-height: 1.375em;
font-weight: 400;
margin-bottom: 20px;
text-align: left;
}
h5 {
font-size: 16px;
line-height: 1.375em;
font-weight: 400;
margin-bottom: 15px;
}
h6 {
font-size: 15px;
line-height: 1.375em;
font-weight: 400;
margin-bottom: 14px;
text-align: right;
}
.jumbotron {
/* background-image: url("images/bulbs.jpg"); */
background-size: 142% auto;
background-align: center;
padding: 100px 25px;
font-family: Montserrat, sans-serif;
background-color: #DDD0C8;
}
.container-fluid {
padding: 60px 50px;
}
.section-navbar {
color: #DDD0C8;
background-color: #323232;
}
.section-default {
color: #323232;
background-color: #FFFFFF;
}
.section-about-me,
.section-about-me h4,
.section-about-me a {
color: #323232;
background-color: #FFFFFF;
text-align: justify;
font-size: 20px;
line-height: 1.8em;
}
.section-recent-news {
color: #323232;
background-color: #FFFFFF;
}
.section-research {
color: #323232;
background-color: #FFFFFF;
text-align: justify;
}
.section-research h3 {
text-align: center;
}
.section-research h4 {
text-align: center;
}
.section-publications,
.section-publications a {
color: #323232;
background-color: #DDD0C8;
text-align: justify;
}
.logo {
color: #f4511e;
font-size: 200px;
}
.thumbnail {
padding: 0 0 15px 0;
border: none;
border-radius: 0;
}
.thumbnail2 {
width: 60px;
height: auto;
margin-bottom: 10px;
}
.carousel-control.right,
.carousel-control.left {
background-image: none;
color: #f4511e;
}
.carousel-indicators li {
border-color: #f4511e;
}
.carousel-indicators li.active {
background-color: #f4511e;
}
.item h4 {
font-size: 19px;
line-height: 1.375em;
font-weight: 400;
font-style: italic;
margin: 70px 0;
}
.item span {
font-style: normal;
}
.panel {
border: 1px solid #f4511e;
border-radius: 0 !important;
transition: box-shadow 0.5s;
}
.panel:hover {
box-shadow: 5px 0px 40px rgba(0, 0, 0, .2);
}
.panel-footer .btn:hover {
border: 1px solid #f4511e;
background-color: #fff !important;
color: #f4511e;
}
.panel-heading {
color: #fff !important;
background-color: #f4511e !important;
padding: 25px;
border-bottom: 1px solid transparent;
border-top-left-radius: 0px;
border-top-right-radius: 0px;
border-bottom-left-radius: 0px;
border-bottom-right-radius: 0px;
}
.panel-footer {
background-color: white !important;
}
.panel-footer h3 {
font-size: 32px;
font-weight: 600;
margin-bottom: 30px;
}
.panel-footer h4 {
font-size: 14px;
}
.panel-footer .btn {
margin: 15px 0;
}
.navbar {
margin-bottom: 0;
z-index: 9999;
border: 0;
font-size: 14px !important;
line-height: 1.42857143 !important;
letter-spacing: 4px;
border-radius: 0;
font-family: Montserrat, sans-serif;
}
.navbar li a,
.navbar .navbar-brand {
font-family: Montserrat, sans-serif;
}
.navbar-nav li a:hover,
.navbar-nav li.active a {
background-color: #DDD0C8 !important;
color: #323232 !important;
transition: 0.3s;
}
.navbar-default .navbar-toggle {
border-color: transparent;
color: #fff !important;
}
footer {
background-color: #ffffff;
}
footer .glyphicon {
font-size: 20px;
margin-bottom: 20px;
color: #323232a2;
}
.glyphicon {
font-size: 20px;
margin-bottom: 20px;
color: #323232a2;
}
.slideanim {
visibility: hidden;
}
.slide {
animation-name: slide;
-webkit-animation-name: slide;
animation-duration: 1s;
-webkit-animation-duration: 1s;
visibility: visible;
}
@keyframes slide {
0% {
opacity: 0;
transform: translateY(70%);
}
100% {
opacity: 1;
transform: translateY(0%);
}
}
@-webkit-keyframes slide {
0% {
opacity: 0;
-webkit-transform: translateY(70%);
}
100% {
opacity: 1;
-webkit-transform: translateY(0%);
}
}
@media screen and (max-width: 768px) {
.col-sm-4 {
text-align: center;
margin: 25px 0;
}
.btn-lg {
width: 100%;
margin-bottom: 35px;
}
}
@media screen and (max-width: 480px) {
.logo {
font-size: 150px;
}
}
.external-link-icon {
float: left;
margin: 0pt 5pt;
width: 50pt;
height: 50pt;
opacity: 1;
}
.external-link-icon img {
transition: 0.3s;
border-radius: 50%;
}
.external-link-icon img:hover {
background-color: #FFFFFF;
transition: 0.3s;
border-radius: 50%;
}
.external-link-container {
text-align: center;
width: 100%;
height: 60pt;
}
.external-link-center-wrapper {
margin: 20px auto auto;
width: 300pt;
height: 60pt;
}
.button {
background-color: #323232;
border: none;
color: white;
padding: 8px 32px;
text-align: center;
font-size: 18px;
margin: 4px 2px;
transition: 0.3s;
display: inline-block;
text-decoration: none;
cursor: pointer;
border-radius: 5px;
}
.button:hover {
background-color: #DDD0C8;
color: #323232;
}
</style>
</head>
<body id="myPage" data-spy="scroll" data-target=".navbar" data-offset="60" class="section-default">
<nav class="navbar navbar-default navbar-fixed-top section-navbar">
<div class="container">
<div class="navbar-header">
<button type="button" class="navbar-toggle" data-toggle="collapse" data-target="#myNavbar">
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<a class="navbar-brand" href="#myPage"><b>Jhair Gallardo</b></a>
</div>
<div class="collapse navbar-collapse" id="myNavbar">
<ul class="nav navbar-nav navbar-right">
<li><a href="#about"><b>ABOUT</b></a></li>
<li><a href="#recent"><b>NEWS</b></a></li>
<li><a href="#research"><b>RESEARCH</b></a></li>
<li><a href="#publications"><b>PUBLICATIONS</b></a></li>
<li data-toggle="collapse" data-target=".navbar-collapse.in"><a href="data/Jhair_Gallardo_CV.pdf" onclick="getOutboundLink('data/Jhair_Gallardo_CV.pdf'); return false;" target="_blank"><b>CV</b></a></li>
</h1>
</div>
</div>
</nav>
<div class="jumbotron text-center">
<div class = "external-link-container">
<div class="external-link-center-wrapper">
<div class = "external-link-icon">
<a href="https://linkedin.com/in/jhairgallardo" onclick="getOutboundLink('https://linkedin.com/in/jhairgallardo'); return false;" target="_blank">
<span><img border="0" alt="Jhair Gallardo Linkedin" src="images/linkedin-logo.png"></img></span>
</a>
</div>
<div class = "external-link-icon">
<a href="https://scholar.google.com/citations?user=gFQHAtQAAAAJ&hl=en&oi=sra" onclick="getOutboundLink('https://scholar.google.com/citations?user=gFQHAtQAAAAJ&hl=en&oi=sra'); return false;" target="_blank">
<span><img border="0" alt="Jhair Gallardo Google Scholar" src="images/gs-logo.png"></img></span>
</a>
</div>
<div class = "external-link-icon">
<a href="https://github.com/jhairgallardo" onclick="getOutboundLink('https://github.com/jhairgallardo'); return false;" target="_blank">
<span><img border="0" alt="Jhair Gallardo GitHub" src="images/github-logo.png"></img></span>
</a>
</div>
<div class = "external-link-icon">
<a href="https://twitter.com/jhairgallardo" onclick="getOutboundLink('https://twitter.com/jhairgallardo'); return false;" target="_blank">
<span><img border="0" alt="Jhair Gallardo Twitter" src="images/twitter-logo.png"></img></span>
</a>
</div>
<div class = "external-link-icon">
<a href="" rel="nofollow" onclick="this.href='mailto:' + 'gg4099' + '@' + 'rit.edu'" target="_blank">
<span><img border="0" alt="Jhair Gallardo Contact" src="images/mail-logo.png"></img></span>
</a>
</div>
</div>
</div>
<h1>Jhair Gallardo<br>
</div>
</form>
<!-- Container (About Section) -->
<div id="about" class="container-fluid section-about-me">
<div class="row">
<h2>About Me</h2>
<div class="col-sm-4 col-sm-offset-3">
<h4>Hello! I am a PhD Candidate working under the supervision of <a href="https://chriskanan.com/">Dr. Christopher Kanan</a> in <a href="http://www.cis.rit.edu/">Chester F. Carlson Center for Imaging Science</a>
at <a href="https://www.rit.edu/">Rochester Institute of Technology (RIT)</a>. My research is focused on self-supervised learning, continual learning, and computer vision. I am mainly interested in efficient continual representation learning systems with minimal supervision.
Previously, I interned at <a href="https://www.siemens-healthineers.com/">Siemens Healthineers</a>, where I worked on self-supervised learning techniques for medical imaging.
</h4>
</div>
<div class="col-sm-2">
<span><img src="images/jhair.jpg" alt="Jhair Gallardo" class="img-responsive" style="border-radius: 25px"></span>
</div>
</div>
</div>
<!-- Container (News) -->
<div id="recent" class="container-fluid section-recent-news">
<div class="row">
<div class="col-sm-8 col-sm-offset-2">
<h2>News</h2>
<!-- Recent News -->
<!-- <h4><strong>Oct 2023:</strong> <span class="newflag">Upcoming!</span> We will host the first <a href="https://unconf.continualai.org/" onclick="getOutboundLink('https://unconf.continualai.org/'); return false;">ContinualAI Unconference</a>!</h4> -->
<h4><strong>Sep 2024:</strong> <span class="newflag">New!</span> Our paper <a href="https://arxiv.org/abs/2405.15018">"What Variables Affect Out-Of-Distribution Generalization in Pretrained Models?"</a> got accepted in NeurIPS 2024!</h4>
<h4><strong>May 2024:</strong> Our TMLR paper <a href="https://arxiv.org/abs/2303.10725">"SIESTA: Efficient Online Continual Learning with Sleep"</a> got accepted in the journal track of CoLLAs 2024!</h4>
<h4><strong>May 2024:</strong> Our paper <a href="https://edas.info/web/sose2024/showManuscript.php?m=1571024835&type=final&ext=pdf&title=PDF+file">"Human Emotion Estimation through Physiological Data with Neural Networks"</a> got accepted in SoSE 2024!</h4>
<h4><strong>Apr 2024:</strong> Our paper <a href="https://arxiv.org/abs/2308.13646">"GRASP: A Rehearsal Policy for Efficient Online Continual Learning"</a> got accepted in CoLLAs 2024!</h4>
<h4><strong>Oct 2023:</strong> Our paper <a href="https://arxiv.org/abs/2303.10725">"SIESTA: Efficient Online Continual Learning with Sleep"</a> got accepted in Transactions on Machine Learning Research (TMLR)!</h4>
<h4><strong>Aug 2023:</strong> Joined <a href="https://www.siemens-healthineers.com/">Siemens Healthineers</a> as an Image Analytics Intern working on self-supervised learning for medical imaging.</h4>
<h4><strong>Apr 2023:</strong> Our paper <a href="https://arxiv.org/abs/2303.18171">"How Efficient Are Today's Continual Learning Algorithms?"</a> got accepted in the CLVISION Workshop at CVPR 2023!</h4>
<h4><strong>Oct 2022:</strong> Gave an invited talk about <a href="https://www.youtube.com/watch?v=y989UzWQCL0">"Classifying Images By Combining Self-Supervised and Continual Learning"</a> as part of the <a href="https://www.rit.edu/chai/">Center for Human-aware AI (CHAI)</a> seminars</h4>
<h4><strong>Jan 2022:</strong> Got accepted in the <a href="https://www.rit.edu/nrtai/">AWARE-AI NRT</a> program from RIT as a Trainee!</h4>
<h4><strong>Oct 2021:</strong> Our paper <a href="https://arxiv.org/abs/2103.14010">"Self-Supervised Training Enhances Online Continual Learning"</a> got accepted for poster presentation at BMVC 2021! (36.21% acceptance rate)</h4>
</div>
<!-- Collapsed News -->
<a class="col-sm-8 col-sm-offset-2 text-center" data-toggle="collapse" data-target="#collapseNews" role="button" aria-expanded="false" aria-controls="collapseNews" ><span class="glyphicon glyphicon-chevron-down"></span></a>
<div class="col-sm-8 col-sm-offset-2 collapse" id="collapseNews">
<h4><strong>Apr 2021:</strong> Gave an invited talk at the <a href="https://www.continualai.org/">Continual AI</a> Reading Group about our paper <a href="https://arxiv.org/abs/2103.14010">"Self-Supervised Training Enhances Online Continual Learning"</a></h4>
<h4><strong>Apr 2020:</strong> Joined the <a href="http://klab.cis.rit.edu/">Machine and Neuromorphic Perception Laboratory</a> as a PhD student working on self-supervised learning, and lifelong machine learning.</h4>
<h4><strong>Apr 2019:</strong> Got admitted to the <a href="https://www.rit.edu/">Rochester Institute of Technology</a> Imaging Science Ph.D. program!</h4>
<h4><strong>May 2018:</strong> Joined <a href="https://www.everis.com/global/en">Everis</a> as a Machine Learning Engineer working on recommendation systems, image classification, object detection, and tracking.</h4>
<h4><strong>Apr 2017:</strong> Joined <a href="https://www.siemens-healthineers.com/">Siemens Healthineers</a> as a Research Intern working on lung cancer detection.</h4>
<h4><strong>Dec 2015:</strong> Obtained my BS in Mechatronics Engineering from <a href="https://www.uni.edu.pe/">Universidad Nacional de Ingeniería</a> in Lima, Peru.</h4>
</div>
</div>
</div>
<!-- Container (Research Section) -->
<div id="research" class="container-fluid section-research">
<div class="row">
<h2>Research</h2>
</div>
<!-- arXiv 2024 -->
<div class="row">
<div class="col-sm-8 col-sm-offset-2">
<h3><strong>NeurIPS 2024</strong>: What Variables Affect Out-Of-Distribution Generalization in Pretrained Models?</h3>
<h4>Md Yousuf Harun, Kyungbok Lee, <strong>Jhair Gallardo</strong>, Giri Krishnan, Christopher Kanan</h4>
<h3>
<a href="https://arxiv.org/abs/2405.15018" onclick="getOutboundLink('https://arxiv.org/abs/2405.15018'); return false;" target="_blank"><button class="button">arXiv</button></a>
<!-- <a href="https://openreview.net/pdf?id=MqDVlBWRRV" onclick="getOutboundLink('https://openreview.net/pdf?id=MqDVlBWRRV'); return false;" target="_blank"><button class="button">Paper</button></a> -->
<a href="https://yousuf907.github.io/oodg/" onclick="getOutboundLink('https://yousuf907.github.io/oodg/'); return false;" target="_blank"><button class="button">Project Page</button></a>
</h3>
</div>
</div>
<div class="row">
<div class="col-sm-2 col-sm-offset-2">
<img src="images/the_tunnel_effect_updated.png" alt="What Variables Affect Out-Of-Distribution Generalization in Pretrained Models?" class="img-responsive"></img>
</div>
<div class="col-sm-6">
<h5 class="h5-small"><p>Embeddings produced by pre-trained deep neural networks (DNNs) are widely used; however, their efficacy for downstream tasks can vary widely.
We study the factors influencing out-of-distribution (OOD) generalization of pre-trained DNN embeddings through the lens of the tunnel effect hypothesis, which
suggests deeper DNN layers compress representations and hinder OOD performance. Contrary to earlier work, we find the tunnel effect is not universal. Based on
10,584 linear probes, we study the conditions that mitigate the tunnel effect by varying DNN architecture, training dataset, image resolution, and augmentations.
We quantify each variable's impact using a novel SHAP analysis. Our results emphasize the danger of generalizing findings from toy datasets to broader contexts.</p></h5>
</div>
</div>
<!-- SoSE 2024 -->
<div class="row">
<div class="col-sm-8 col-sm-offset-2">
<h3><strong>SoSE 2024</strong>: Human Emotion Estimation through Physiological Data with Neural Networks</h3>
<h4><strong>Jhair Gallardo</strong>, Celal Savur, Ferat Sahin, Christopher Kanan</h4>
<h3>
<a href="https://edas.info/web/sose2024/showManuscript.php?m=1571024835&type=final&ext=pdf&title=PDF+file" onclick="getOutboundLink('https://edas.info/web/sose2024/showManuscript.php?m=1571024835&type=final&ext=pdf&title=PDF+file'); return false;" target="_blank"><button class="button">Paper</button></a>
<!-- <a href="https://openreview.net/pdf?id=MqDVlBWRRV" onclick="getOutboundLink('https://openreview.net/pdf?id=MqDVlBWRRV'); return false;" target="_blank"><button class="button">Paper</button></a> -->
<!-- <a href="https://yousuf907.github.io/oodg/" onclick="getOutboundLink('https://yousuf907.github.io/oodg/'); return false;" target="_blank"><button class="button">Project Page</button></a> -->
</h3>
</div>
</div>
<div class="row">
<div class="col-sm-2 col-sm-offset-2">
<img src="images/sose2024.png" alt="Human Emotion Estimation through Physiological Data with Neural Networks" class="img-responsive"></img>
</div>
<div class="col-sm-6">
<h5 class="h5-small"><p>Effective collaboration between humans and robots necessitates that the robotic partner can perceive, learn from, and respond to the human's
psycho-physiological conditions. This involves understanding the emotional states of the human collaborator. To explore this, we collected subjective assessments — specifically,
feelings of surprise, anxiety, boredom, calmness, and comfort — as well as physiological signals during a dynamic
human-robot interaction experiment. The experiment manipulated the robot's behavior to observe these responses. We gathered data from this non-stationary setting
and trained an artificial neural network model to predict human emotion from physiological data. We found that using several subjects' data to train a general model
and then fine-tuning it on the subject of interest performs better than training a model only using the subject of interest data.</p></h5>
</div>
</div>
<!-- CoLLAs 2024 -->
<div class="row">
<div class="col-sm-8 col-sm-offset-2">
<h3><strong>CoLLAs 2024</strong>: GRASP: A Rehearsal Policy for Efficient Online Continual Learning</h3>
<h4>Md Yousuf Harun, <strong>Jhair Gallardo</strong>, Christopher Kanan</h4>
<h3>
<a href="https://arxiv.org/abs/2308.13646" onclick="getOutboundLink('https://arxiv.org/abs/2308.13646'); return false;" target="_blank"><button class="button">arXiv</button></a>
<a href="https://drive.google.com/file/d/1Q-KITER5OXKhF-JHY4OshqlNxeVfbcnP/view" onclick="getOutboundLink('https://drive.google.com/file/d/1Q-KITER5OXKhF-JHY4OshqlNxeVfbcnP/view'); return false;" target="_blank"><button class="button">Paper</button></a>
<a href="https://yousuf907.github.io/graspsite" onclick="getOutboundLink('https://yousuf907.github.io/graspsite'); return false;" target="_blank"><button class="button">Project Page</button></a>
</h3>
</div>
</div>
<div class="row">
<div class="col-sm-2 col-sm-offset-2">
<img src="images/grasp.PNG" alt="GRASP: A Rehearsal Policy for Efficient Online Continual Learning" class="img-responsive"></img>
</div>
<div class="col-sm-6">
<h5 class="h5-small"><p>Continual learning (CL) in deep neural networks (DNNs) involves incrementally accumulating knowledge in a DNN from a growing data stream. A major challenge in CL is that non-stationary data streams cause catastrophic forgetting of previously learned abilities. Rehearsal is a popular and effective way to mitigate this problem, which is storing past observations in a buffer and mixing them with new observations during learning. This leads to a question: Which stored samples should be selected for rehearsal? Choosing samples that are best for learning, rather than simply selecting them at random, could lead to significantly faster learning. For class incremental learning, prior work has shown that a simple class balanced random selection policy outperforms more sophisticated methods. Here, we revisit this question by exploring a new sample selection policy called GRASP. GRASP selects the most prototypical (class representative) samples first and then gradually selects less prototypical (harder) examples to update the DNN. GRASP has little additional compute or memory overhead compared to uniform selection, enabling it to scale to large datasets. We evaluate GRASP and other policies by conducting CL experiments on the large-scale ImageNet-1K and Places-LT image classification datasets. GRASP outperforms all other rehearsal policies. Beyond vision, we also demonstrate that GRASP is effective for CL on five text classification datasets.</p></h5>
</div>
</div>
<!-- TMLR 2023 -->
<div class="row">
<div class="col-sm-8 col-sm-offset-2">
<h3><strong>TMLR 2023</strong>: SIESTA: Efficient Online Continual Learning with Sleep</h3>
<h4>Md Yousuf Harun*, <strong>Jhair Gallardo</strong>*, Tyler L. Hayes, Ronald Kemker, Christopher Kanan</h4>
<h6>* denotes equal contribution.</h6>
<h3>
<a href="https://arxiv.org/abs/2303.10725" onclick="getOutboundLink('https://arxiv.org/abs/2303.10725'); return false;" target="_blank"><button class="button">arXiv</button></a>
<a href="https://openreview.net/pdf?id=MqDVlBWRRV" onclick="getOutboundLink('https://openreview.net/pdf?id=MqDVlBWRRV'); return false;" target="_blank"><button class="button">Paper</button></a>
<a href="https://yousuf907.github.io/siestasite/" onclick="getOutboundLink('https://yousuf907.github.io/siestasite/'); return false;" target="_blank"><button class="button">Project Page</button></a>
</h3>
</div>
</div>
<div class="row">
<div class="col-sm-2 col-sm-offset-2">
<img src="images/siesta.png" alt="SIESTA: Efficient Online Continual Learning with Sleep" class="img-responsive"></img>
</div>
<div class="col-sm-6">
<h5 class="h5-small"><p>In supervised continual learning, a deep neural network (DNN) is updated with an ever-growing data stream. Unlike the offline setting where data is shuffled, we cannot make any distributional assumptions about the data stream. Ideally, only one pass through the dataset is needed for computational efficiency. However, existing methods are inadequate and make many assumptions that cannot be
made for real-world applications, while simultaneously failing to improve computational efficiency.
In this paper, we propose a novel online continual learning method, SIESTA based on wake/sleep
framework for training, which is well aligned to the needs of on-device learning. The major goal of
SIESTA is to advance compute efficient continual learning so that DNNs can be updated efficiently
using far less time and energy. The principal innovations of SIESTA are: 1) rapid online updates
using a rehearsal-free, backpropagation-free, and data-driven network update rule during its wake
phase, and 2) expedited memory consolidation using a compute-restricted rehearsal policy during its
sleep phase. For memory efficiency, SIESTA adapts latent rehearsal using memory indexing from
REMIND. Compared to REMIND and prior arts, SIESTA is far more computationally efficient, enabling continual learning on ImageNet-1K in under 2.4 hours on a single GPU; moreover, in the
augmentation-free setting it matches the performance of the offline learner, a milestone critical to
driving adoption of continual learning in real-world applications.</p></h5>
</div>
</div>
<!-- CVPRW 2023 -->
<div class="row">
<div class="col-sm-8 col-sm-offset-2">
<h3><strong>CVPRW 2023</strong>: How Efficient Are Today's Continual Learning Algorithms?</h3>
<h4>Md Yousuf Harun, <strong>Jhair Gallardo</strong>, Tyler L. Hayes, Christopher Kanan</h4>
<h3>
<a href="https://arxiv.org/abs/2303.18171" onclick="getOutboundLink('https://arxiv.org/abs/2303.18171'); return false;" target="_blank"><button class="button">arXiv</button></a>
<a href="https://openaccess.thecvf.com/content/CVPR2023W/CLVision/papers/Harun_How_Efficient_Are_Todays_Continual_Learning_Algorithms_CVPRW_2023_paper.pdf" onclick="getOutboundLink('https://openaccess.thecvf.com/content/CVPR2023W/CLVision/papers/Harun_How_Efficient_Are_Todays_Continual_Learning_Algorithms_CVPRW_2023_paper.pdf'); return false;" target="_blank"><button class="button">Paper</button></a>
<a href="https://yousuf907.github.io/files/Poster_CVPRW_2023.pdf" onclick="getOutboundLink('https://yousuf907.github.io/files/Poster_CVPRW_2023.pdf'); return false;" target="_blank"><button class="button">Poster</button></a>
</h3>
</div>
</div>
<div class="row">
<div class="col-sm-2 col-sm-offset-2">
<img src="images/cvprw_2023.PNG" alt="How Efficient Are Today's Continual Learning Algorithms?" class="img-responsive"></img>
</div>
<div class="col-sm-6">
<h5 class="h5-small"><p>Supervised Continual learning involves updating a deep neural network (DNN) from an ever-growing stream of labeled data. While most work has focused on overcoming catastrophic forgetting, one of the major motivations behind continual learning is being able to efficiently update a network with new information, rather than retraining from scratch on the training dataset as it grows over time. Despite recent continual learning methods largely solving the catastrophic forgetting problem, there has been little attention paid to the efficiency of these algorithms. Here, we study recent methods for incremental class learning and illustrate that many are highly inefficient in terms of compute, memory, and storage. Some methods even require more compute than training from scratch! We argue that for continual learning to have real-world applicability, the research community cannot ignore the resources used by these algorithms. There is more to continual learning than mitigating catastrophic forgetting.</p></h5>
</div>
</div>
<!-- BMVC 2021 -->
<div class="row">
<div class="col-sm-8 col-sm-offset-2">
<h3><strong>BMVC 2021</strong>: Self-Supervised Training Enhances Online Continual Learning</h3>
<h4><strong>Jhair Gallardo</strong>, Tyler L. Hayes, Christopher Kanan</h4>
<h3>
<a href="https://arxiv.org/abs/2103.14010" onclick="getOutboundLink('https://arxiv.org/abs/2103.14010'); return false;" target="_blank"><button class="button">arXiv</button></a>
<a href="https://www.bmvc2021-virtualconference.com/assets/papers/0636.pdf" onclick="getOutboundLink('https://www.bmvc2021-virtualconference.com/assets/papers/0636.pdf'); return false;" target="_blank"><button class="button">Paper</button></a>
</h3>
</div>
</div>
<div class="row">
<div class="col-sm-2 col-sm-offset-2">
<img src="images/self-supervised-cl.png" alt="Self-Supervised Training Enhances Online Continual Learning" class="img-responsive"></img>
</div>
<div class="col-sm-6">
<h5 class="h5-small"><p>In continual learning, a system must incrementally learn from a non-stationary data stream without catastrophic forgetting. Recently, multiple methods have been devised for incrementally learning classes on large-scale image classification tasks, such as ImageNet. State-of-the-art continual learning methods use an initial supervised pre-training phase, in which the first 10% - 50% of the classes in a dataset are used to learn representations in an offline manner before continual learning of new classes begins. We hypothesize that self-supervised pre-training could yield features that generalize better than supervised learning, especially when the number of samples used for pre-training is small. We test this hypothesis using the self-supervised MoCo-V2 and SwAV algorithms. On ImageNet, we find that both outperform supervised pre-training considerably for online continual learning, and the gains are larger when fewer samples are available. Our findings are consistent across three continual learning algorithms. Our best system achieves a 14.95% relative increase in top-1 accuracy on class incremental ImageNet over the prior state of the art for online continual learning.</p></h5>
</div>
</div>
</div>
<!-- Container (Publications) -->
<div id="publications" class="container-fluid section-publications">
<div class="row">
<div class="col-sm-8 col-sm-offset-2">
<h2>Publications</h2>
<h4>Peer-Reviewed Papers</h4>
<h5><ul style="list-style-type:circle">
<li>M.Y. Harun, K. Lee, <strong>J. Gallardo</strong>, G. Krishnan, and C. Kanan. <a href="https://arxiv.org/abs/2405.15018" onclick="getOutboundLink('https://arxiv.org/abs/2405.15018'); return false;">What Variables Affect Out-Of-Distribution Generalization in Pretrained Models?</a>. In: Conference on Neural Information Processing Systems (NeurIPS), 2024</li>
<li><strong>J. Gallardo</strong>, C. Savur, F. Sahin, and C. Kanan. <a href="https://edas.info/web/sose2024/showManuscript.php?m=1571024835&type=final&ext=pdf&title=PDF+file" onclick="getOutboundLink('https://edas.info/web/sose2024/showManuscript.php?m=1571024835&type=final&ext=pdf&title=PDF+file'); return false;">Human Emotion Estimation through Physiological Data with Neural Networks</a>. In: System of Systems Engineering Conference (SoSE), 2024</li>
<li>M.Y. Harun, <strong>J. Gallardo</strong>, and C. Kanan. <a href="https://arxiv.org/abs/2308.13646" onclick="getOutboundLink('https://arxiv.org/abs/2308.13646'); return false;">GRASP: A Rehearsal Policy for Efficient Online Continual Learning</a>. In: Conference on Lifelong Learning Agents (CoLLAs), 2024</li>
<li>M.Y. Harun, <strong>J. Gallardo</strong>, T.L. Hayes, R. Kemker, and C. Kanan. <a href="https://arxiv.org/abs/2303.10725" onclick="getOutboundLink('https://arxiv.org/abs/2303.10725'); return false;">SIESTA: Efficient online continual learning with sleep</a>. Transactions on Machine Learning Research (TMLR), 2023</li>
<li>M.Y. Harun, <strong>J. Gallardo</strong>, T.L. Hayes and C. Kanan. <a href="https://arxiv.org/abs/2303.18171" onclick="getOutboundLink('https://arxiv.org/abs/2303.18171'); return false;">How efficient are today's continual learning algorithms?</a>. CVPR Workshop: Continual Learning in Computer Vision, 2023</li>
<li>I. Sur, Z. Daniels, A. Rahman, K. Faber, <strong>J. Gallardo</strong>, T.L. Hayes, C.E. Taylor, M.B. Gurbuz, J. Smith, S. Joshi, N. Japkowicz, M. Baron, Z. Kira, C. Kanan, R. Corizzo, A. Divakaran, M. Piacentino, J. Hostetler, and A. Raghavan. <a href="https://arxiv.org/abs/2212.04603" onclick="getOutboundLink('https://arxiv.org/abs/2212.04603'); return false;">System design for an integrated lifelong reinforcement learning agent for real-time strategy games</a>. In: International Conference on AI-ML Systems, 2022</li>
<li><strong>J. Gallardo</strong>, T.L. Hayes, and C. Kanan. <a href="https://arxiv.org/abs/2103.14010" onclick="getOutboundLink('https://arxiv.org/abs/2103.14010'); return false;">Self-supervised training enhances online continual learning</a>. In: British Machine Vision Conference (BMVC), 2021</li>
<li>G. Garcia, <strong>J. Gallardo</strong>, A. Mauricio, J. Lopez, and C. del Carpio. <a href="https://link.springer.com/chapter/10.1007/978-3-319-68612-7_72" onclick="getOutboundLink('https://link.springer.com/chapter/10.1007/978-3-319-68612-7_72'); return false;">Detection of Diabetic Retinopathy Based on a Convolutional Neural Network Using Retinal Fundus Images</a>. In: International Conference on Artificial Neural Networks (ICANN), 2017</li>
</ul></h5>
<!-- <h4>Pre-Prints</h4>
<h5><ul style="list-style-type:circle">
<li>M.Y. Harun, K. Lee, <strong>J. Gallardo</strong>, G. Krishnan, and C. Kanan. <a href="https://arxiv.org/abs/2405.15018" onclick="getOutboundLink('https://arxiv.org/abs/2405.15018'); return false;">What Variables Affect Out-Of-Distribution Generalization in Pretrained Models?</a>. 2024</li>
</ul></h5> -->
</div>
</div>
</div>
<footer class="container-fluid text-center">
<a href="#myPage" title="To Top">
<span class="glyphicon glyphicon-chevron-up"></span>
</a>
</footer>
<script>
$(document).ready(function(){
// Add smooth scrolling to all links in navbar + footer link
$(".navbar a, footer a[href='#myPage']").on('click', function(event) {
// Make sure this.hash has a value before overriding default behavior
if (this.hash !== "") {
// Prevent default anchor click behavior
event.preventDefault();
// Store hash
var hash = this.hash;
// Using jQuery's animate() method to add smooth page scroll
// The optional number (900) specifies the number of milliseconds it takes to scroll to the specified area
$('html, body').animate({
scrollTop: $(hash).offset().top
}, 900, function(){
// Add hash (#) to URL when done scrolling (default click behavior)
window.location.hash = hash;
});
} // End if
});
$(window).scroll(function() {
$(".slideanim").each(function(){
var pos = $(this).offset().top;
var winTop = $(window).scrollTop();
if (pos < winTop + 600) {
$(this).addClass("slide");
}
});
});
})
</script>
</body>
</html>