generated from jhudsl/OTTR_Template
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathbook.bib
1033 lines (926 loc) · 67.2 KB
/
book.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
@online{slowboring,
author = {Matthew Yglesias},
title = {ChatGPT Goes to Harvard},
year = {2023},
url = {https://www.slowboring.com/p/chatgpt-goes-to-harvard}
}
@article{chronicle-higher-ed,
author = {Terry, O.},
title = {I'm a Student: You Have No Idea How Much We're Using ChatGPT},
journal = {The Chronicle of Higher Education},
year = {2023},
url = {https://www.chronicle.com/article/im-a-student-you-have-no-idea-how-much-were-using-chatgpt}
}
@article{washingtonpost-opinions,
author = {Roberts, M.},
title = {AI is forcing teachers to confront an existential question},
journal = {The Washington Post},
year = {2023},
url = {https://www.washingtonpost.com/opinions/2023/12/12/ai-chatgpt-universities-learning/}
}
@online{nytimes-technology1,
title = {Despite Cheating Fears, Schools Repeal ChatGPT Bans},
author = {Singer, N.},
year = {2023},
url = {https://www.nytimes.com/2023/08/24/business/schools-chatgpt-chatbot-bans.html}
}
@online{mississippi-ai-blog,
title = {AI Should Revolutionize Teaching, But Not in the Way You Think},
author = {Donahue, E.},
year = {2023},
url = {https://blog.mississippi.ai/ai-should-revolutionize-teaching-but-not-in-the-way-you-think}
}
@online{nytimes-technology2,
title = {Alarmed by A.I. Chatbots, Universities Start Revamping How They Teach},
author = {Huang, K.},
year = {2023},
url = {https://www.nytimes.com/2023/01/16/technology/chatgpt-artificial-intelligence-universities.html}
}
@online{nytimes-technology3,
title = {At This School, Computer Science Class Now Includes Critiquing Chatbots},
author = {Singer, N.},
year = {2023},
url = {https://www.nytimes.com/2023/02/06/technology/chatgpt-schools-teachers-ai-ethics.html}
}
@online{omfif2023,
author = {{OMFIF}},
title = {How Central Banks Are Already Deploying Artificial Intelligence},
year = {2023},
month = {September},
url = {https://www.omfif.org/2023/09/how-central-banks-are-already-deploying-artificial-intelligence/},
}
@techreport{stlouisfed2023,
author = {{Federal Reserve Bank of St. Louis}},
title = {Monetary Policy and Economic Performance since the Global Financial Crisis},
institution = {Working Paper Series},
number = {2023-015},
year = {2023},
url = {https://research.stlouisfed.org/wp/more/2023-015},
}
@article{nelson2023,
author = {Nelson, E.},
title = {European Central Bank Moves Toward Embracing Artificial Intelligence},
journal = {The New York Times},
year = {2023},
month = {September 28},
url = {https://www.nytimes.com/2023/09/28/business/european-central-bank-artificial-intelligence.html},
}
@online{moufakkir2023,
author = {Moufakkir, M.},
title = {Understanding and Guiding Artificial Intelligence in Central Banking},
howpublished = {European Central Bank Blog},
year = {2023},
month = {September 28},
url = {https://www.ecb.europa.eu/press/blog/date/2023/html/ecb.blog230928~3f76d57cce.en.html},
}
@Manual{rmarkdown2021,
title = {rmarkdown: Dynamic Documents for R},
author = {JJ Allaire and Yihui Xie and Jonathan McPherson and Javier Luraschi and Kevin Ushey and Aron Atkins and Hadley Wickham and Joe Cheng and Winston Chang and Richard Iannone},
year = {2021},
note = {R package version 2.10},
url = {https://github.com/rstudio/rmarkdown},
}
@Book{Xie2018,
title = {R Markdown: The Definitive Guide},
author = {Yihui Xie and J.J. Allaire and Garrett Grolemund},
publisher = {Chapman and Hall/CRC},
address = {Boca Raton, Florida},
year = {2018},
note = {ISBN 9781138359338},
url = {https://bookdown.org/yihui/rmarkdown},
}
@Book{Xie2020,
title = {R Markdown Cookbook},
author = {Yihui Xie and Christophe Dervieux and Emily Riederer},
publisher = {Chapman and Hall/CRC},
address = {Boca Raton, Florida},
year = {2020},
note = {ISBN 9780367563837},
url = {https://bookdown.org/yihui/rmarkdown-cookbook},
}
@ARTICLE{Mattson2014,
AUTHOR={Mattson, Mark P.},
TITLE={Superior pattern processing is the essence of the evolved human brain},
JOURNAL={Frontiers in Neuroscience},
VOLUME={8},
YEAR={2014},
URL={https://www.frontiersin.org/articles/10.3389/fnins.2014.00265},
DOI={10.3389/fnins.2014.00265},
ISSN={1662-453X},
ABSTRACT={Humans have long pondered the nature of their mind/brain and, particularly why its capacities for reasoning, communication and abstract thought are far superior to other species, including closely related anthropoids. This article considers superior pattern processing (SPP) as the fundamental basis of most, if not all, unique features of the human brain including intelligence, language, imagination, invention, and the belief in imaginary entities such as ghosts and gods. SPP involves the electrochemical, neuronal network-based, encoding, integration, and transfer to other individuals of perceived or mentally-fabricated patterns. During human evolution, pattern processing capabilities became increasingly sophisticated as the result of expansion of the cerebral cortex, particularly the prefrontal cortex and regions involved in processing of images. Specific patterns, real or imagined, are reinforced by emotional experiences, indoctrination and even psychedelic drugs. Impaired or dysregulated SPP is fundamental to cognitive and psychiatric disorders. A broader understanding of SPP mechanisms, and their roles in normal and abnormal function of the human brain, may enable the development of interventions that reduce irrational decisions and destructive behaviors.}
}
@article{belenguer_ai_2022,
title = {{AI} bias: exploring discriminatory algorithmic decision-making models and the application of possible machine-centric solutions adapted from the pharmaceutical industry},
volume = {2},
issn = {2730-5953},
shorttitle = {{AI} bias},
url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC8830968/},
doi = {10.1007/s43681-022-00138-8},
abstract = {A new and unorthodox approach to deal with discriminatory bias in Artificial Intelligence is needed. As it is explored in detail, the current literature is a dichotomy with studies originating from the contrasting fields of study of either philosophy and sociology or data science and programming. It is suggested that there is a need instead for an integration of both academic approaches, and needs to be machine-centric rather than human-centric applied with a deep understanding of societal and individual prejudices. This article is a novel approach developed into a framework of action: a bias impact assessment to raise awareness of bias and why, a clear set of methodologies as shown in a table comparing with the four stages of pharmaceutical trials, and a summary flowchart. Finally, this study concludes the need for a transnational independent body with enough power to guarantee the implementation of those solutions.},
number = {4},
urldate = {2023-05-02},
journal = {Ai and Ethics},
author = {Belenguer, Lorenzo},
year = {2022},
pmid = {35194591},
pmcid = {PMC8830968},
pages = {771--787}
}
@misc{AI_paternalism,
title = {Artificial intelligence is infiltrating health care. {We} shouldn’t let it make all the decisions.},
url = {https://www.technologyreview.com/2023/04/21/1071921/ai-is-infiltrating-health-care-we-shouldnt-let-it-make-decisions/},
abstract = {AI paternalism could put patient autonomy at risk—if we let it.},
language = {en},
author = {Jessica Hamzelou},
urldate = {2023-05-08},
journal = {MIT Technology Review},
}
@misc{kowaleski_can_2019,
address = {Rochester, NY},
type = {{SSRN} {Scholarly} {Paper}},
title = {Can {Ethics} be {Taught}? {Evidence} from {Securities} {Exams} and {Investment} {Adviser} {Misconduct}},
shorttitle = {Can {Ethics} be {Taught}?},
url = {https://papers.ssrn.com/abstract=3457588},
doi = {10.2139/ssrn.3457588},
abstract = {We study the consequences of a 2010 change in the investment adviser qualification exam that reallocated coverage from the rules and ethics section to the technical material section. Comparing advisers with the same employer in the same location and year, we find those passing the exam with more rules and ethics coverage are one-fourth less likely to commit misconduct. The exam change appears to affect advisers’ perception of acceptable conduct, and not just their awareness of specific rules or selection into the qualification. Those passing the rules and ethics-focused exam are more likely to depart employers experiencing scandals. Such departures also predict future scandals. Our paper offers the first archival evidence on how rules and ethics training affects conduct and labor market activity in the financial sector.},
language = {en},
urldate = {2023-12-12},
author = {Kowaleski, Zachary T. and Sutherland, Andrew and Vetter, Felix},
month = sep,
year = {2019},
keywords = {compliance training, ethics, ethics training, financial misconduct, financial regulation, fraud, investment advisers, labor economics},
}
@article{giorgini_researcher_2015,
title = {Researcher {Perceptions} of {Ethical} {Guidelines} and {Codes} of {Conduct}},
volume = {22},
issn = {0898-9621},
url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4313573/},
doi = {10.1080/08989621.2014.955607},
abstract = {Ethical codes of conduct exist in almost every profession. Field-specific codes of conduct have been around for decades, each articulating specific ethical and professional guidelines. However, there has been little empirical research on researchers’ perceptions of these codes of conduct. In the present study, we interviewed faculty members in six research disciplines and identified five themes bearing on the circumstances under which they use ethical guidelines and the underlying reasons for not adhering to such guidelines. We then identify problems with the manner in which codes of conduct in academia are constructed and offer solutions for overcoming these problems.},
number = {3},
urldate = {2023-12-12},
journal = {Accountability in research},
author = {Giorgini, Vincent and Mecca, Jensen T. and Gibson, Carter and Medeiros, Kelsey and Mumford, Michael D. and Connelly, Shane and Devenport, Lynn D.},
year = {2015},
pmid = {25635845},
pmcid = {PMC4313573},
pages = {123--138},
}
@article{pethig_biased_2023,
title = {Biased {Humans}, ({Un}){Biased} {Algorithms}?},
volume = {183},
issn = {1573-0697},
url = {https://doi.org/10.1007/s10551-022-05071-8},
doi = {10.1007/s10551-022-05071-8},
abstract = {Previous research has shown that algorithmic decisions can reflect gender bias. The increasingly widespread utilization of algorithms in critical decision-making domains (e.g., healthcare or hiring) can thus lead to broad and structural disadvantages for women. However, women often experience bias and discrimination through human decisions and may turn to algorithms in the hope of receiving neutral and objective evaluations. Across three studies (N = 1107), we examine whether women’s receptivity to algorithms is affected by situations in which they believe that their gender identity might disadvantage them in an evaluation process. In Study 1, we establish, in an incentive-compatible online setting, that unemployed women are more likely to choose to have their employment chances evaluated by an algorithm if the alternative is an evaluation by a man rather than a woman. Study 2 generalizes this effect by placing it in a hypothetical hiring context, and Study 3 proposes that relative algorithmic objectivity, i.e., the perceived objectivity of an algorithmic evaluator over and against a human evaluator, is a driver of women’s preferences for evaluations by algorithms as opposed to men. Our work sheds light on how women make sense of algorithms in stereotype-relevant domains and exemplifies the need to provide education for those at risk of being adversely affected by algorithmic decisions. Our results have implications for the ethical management of algorithms in evaluation settings. We advocate for improving algorithmic literacy so that evaluators and evaluatees (e.g., hiring managers and job applicants) can acquire the abilities required to reflect critically on algorithmic decisions.},
language = {en},
number = {3},
urldate = {2023-12-13},
journal = {Journal of Business Ethics},
author = {Pethig, Florian and Kroenung, Julia},
month = mar,
year = {2023},
keywords = {Algorithms, Gender bias, Objectivity, Stigma},
pages = {637--652},
}
@article{dastin_insight_2018,
chapter = {World},
title = {Insight - {Amazon} scraps secret {AI} recruiting tool that showed bias against women},
url = {https://www.reuters.com/article/idUSKCN1MK0AG/},
abstract = {Amazon.com Inc's \<AMZN.O\> machine-learning specialists uncovered a big problem: their new recruiting engine did not like women.},
language = {en-US},
urldate = {2023-12-13},
journal = {Reuters},
author = {Dastin, Jeffrey},
month = oct,
year = {2018},
}
@article{gichoya_ai_2022,
title = {{AI} recognition of patient race in medical imaging: a modelling study},
volume = {4},
issn = {2589-7500},
shorttitle = {{AI} recognition of patient race in medical imaging},
url = {https://www.thelancet.com/journals/landig/article/PIIS2589-7500(22)00063-2/fulltext},
doi = {10.1016/S2589-7500(22)00063-2},
language = {English},
number = {6},
urldate = {2023-12-13},
journal = {The Lancet Digital Health},
author = {Gichoya, Judy Wawira and Banerjee, Imon and Bhimireddy, Ananth Reddy and Burns, John L. and Celi, Leo Anthony and Chen, Li-Ching and Correa, Ramon and Dullerud, Natalie and Ghassemi, Marzyeh and Huang, Shih-Cheng and Kuo, Po-Chih and Lungren, Matthew P. and Palmer, Lyle J. and Price, Brandon J. and Purkayastha, Saptarshi and Pyrros, Ayis T. and Oakden-Rayner, Lauren and Okechukwu, Chima and Seyyed-Kalantari, Laleh and Trivedi, Hari and Wang, Ryan and Zaiman, Zachary and Zhang, Haoran},
month = jun,
year = {2022},
pmid = {35568690},
note = {Publisher: Elsevier},
pages = {e406--e414},
}
@article{seyyed-kalantari_underdiagnosis_2021,
title = {Underdiagnosis bias of artificial intelligence algorithms applied to chest radiographs in under-served patient populations},
volume = {27},
copyright = {2021 The Author(s)},
issn = {1546-170X},
url = {https://www.nature.com/articles/s41591-021-01595-0},
doi = {10.1038/s41591-021-01595-0},
abstract = {Artificial intelligence (AI) systems have increasingly achieved expert-level performance in medical imaging applications. However, there is growing concern that such AI systems may reflect and amplify human bias, and reduce the quality of their performance in historically under-served populations such as female patients, Black patients, or patients of low socioeconomic status. Such biases are especially troubling in the context of underdiagnosis, whereby the AI algorithm would inaccurately label an individual with a disease as healthy, potentially delaying access to care. Here, we examine algorithmic underdiagnosis in chest X-ray pathology classification across three large chest X-ray datasets, as well as one multi-source dataset. We find that classifiers produced using state-of-the-art computer vision techniques consistently and selectively underdiagnosed under-served patient populations and that the underdiagnosis rate was higher for intersectional under-served subpopulations, for example, Hispanic female patients. Deployment of AI systems using medical imaging for disease diagnosis with such biases risks exacerbation of existing care biases and can potentially lead to unequal access to medical treatment, thereby raising ethical concerns for the use of these models in the clinic.},
language = {en},
number = {12},
urldate = {2023-12-13},
journal = {Nature Medicine},
author = {Seyyed-Kalantari, Laleh and Zhang, Haoran and McDermott, Matthew B. A. and Chen, Irene Y. and Ghassemi, Marzyeh},
month = dec,
year = {2021},
note = {Number: 12
Publisher: Nature Publishing Group},
keywords = {Machine learning, Medical imaging},
pages = {2176--2182},
}
@article{ricci_lara_addressing_2022,
title = {Addressing fairness in artificial intelligence for medical imaging},
volume = {13},
copyright = {2022 The Author(s)},
issn = {2041-1723},
url = {https://www.nature.com/articles/s41467-022-32186-3},
doi = {10.1038/s41467-022-32186-3},
abstract = {A plethora of work has shown that AI systems can systematically and unfairly be biased against certain populations in multiple scenarios. The field of medical imaging, where AI systems are beginning to be increasingly adopted, is no exception. Here we discuss the meaning of fairness in this area and comment on the potential sources of biases, as well as the strategies available to mitigate them. Finally, we analyze the current state of the field, identifying strengths and highlighting areas of vacancy, challenges and opportunities that lie ahead.},
language = {en},
number = {1},
urldate = {2023-12-13},
journal = {Nature Communications},
author = {Ricci Lara, María Agustina and Echeveste, Rodrigo and Ferrante, Enzo},
month = aug,
year = {2022},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {Image processing, Machine learning},
pages = {4581},
}
@misc{puyol-anton_fairness_2021,
title = {Fairness in {Cardiac} {MR} {Image} {Analysis}: {An} {Investigation} of {Bias} {Due} to {Data} {Imbalance} in {Deep} {Learning} {Based} {Segmentation}},
shorttitle = {Fairness in {Cardiac} {MR} {Image} {Analysis}},
url = {http://arxiv.org/abs/2106.12387},
doi = {10.48550/arXiv.2106.12387},
abstract = {The subject of "fairness" in artificial intelligence (AI) refers to assessing AI algorithms for potential bias based on demographic characteristics such as race and gender, and the development of algorithms to address this bias. Most applications to date have been in computer vision, although some work in healthcare has started to emerge. The use of deep learning (DL) in cardiac MR segmentation has led to impressive results in recent years, and such techniques are starting to be translated into clinical practice. However, no work has yet investigated the fairness of such models. In this work, we perform such an analysis for racial/gender groups, focusing on the problem of training data imbalance, using a nnU-Net model trained and evaluated on cine short axis cardiac MR data from the UK Biobank dataset, consisting of 5,903 subjects from 6 different racial groups. We find statistically significant differences in Dice performance between different racial groups. To reduce the racial bias, we investigated three strategies: (1) stratified batch sampling, in which batch sampling is stratified to ensure balance between racial groups; (2) fair meta-learning for segmentation, in which a DL classifier is trained to classify race and jointly optimized with the segmentation model; and (3) protected group models, in which a different segmentation model is trained for each racial group. We also compared the results to the scenario where we have a perfectly balanced database. To assess fairness we used the standard deviation (SD) and skewed error ratio (SER) of the average Dice values. Our results demonstrate that the racial bias results from the use of imbalanced training data, and that all proposed bias mitigation strategies improved fairness, with the best SD and SER resulting from the use of protected group models.},
urldate = {2023-12-13},
publisher = {arXiv},
author = {Puyol-Anton, Esther and Ruijsink, Bram and Piechnik, Stefan K. and Neubauer, Stefan and Petersen, Steffen E. and Razavi, Reza and King, Andrew P.},
month = jul,
year = {2021},
note = {arXiv:2106.12387 [cs]},
keywords = {Computer Science - Artificial Intelligence, Computer Science - Computer Vision and Pattern Recognition},
annote = {Comment: MICCAI 2021 conference},
file = {arXiv Fulltext PDF:/Users/carriewright/Zotero/storage/X9LN5YTJ/Puyol-Anton et al. - 2021 - Fairness in Cardiac MR Image Analysis An Investig.pdf:application/pdf;arXiv.org Snapshot:/Users/carriewright/Zotero/storage/6EN5T3JE/2106.html:text/html},
}
@article{pierson_algorithmic_2021,
title = {An algorithmic approach to reducing unexplained pain disparities in underserved populations},
volume = {27},
copyright = {2021 The Author(s), under exclusive licence to Springer Nature America, Inc.},
issn = {1546-170X},
url = {https://www.nature.com/articles/s41591-020-01192-7},
doi = {10.1038/s41591-020-01192-7},
abstract = {Underserved populations experience higher levels of pain. These disparities persist even after controlling for the objective severity of diseases like osteoarthritis, as graded by human physicians using medical images, raising the possibility that underserved patients’ pain stems from factors external to the knee, such as stress. Here we use a deep learning approach to measure the severity of osteoarthritis, by using knee X-rays to predict patients’ experienced pain. We show that this approach dramatically reduces unexplained racial disparities in pain. Relative to standard measures of severity graded by radiologists, which accounted for only 9\% (95\% confidence interval (CI), 3–16\%) of racial disparities in pain, algorithmic predictions accounted for 43\% of disparities, or 4.7× more (95\% CI, 3.2–11.8×), with similar results for lower-income and less-educated patients. This suggests that much of underserved patients’ pain stems from factors within the knee not reflected in standard radiographic measures of severity. We show that the algorithm’s ability to reduce unexplained disparities is rooted in the racial and socioeconomic diversity of the training set. Because algorithmic severity measures better capture underserved patients’ pain, and severity measures influence treatment decisions, algorithmic predictions could potentially redress disparities in access to treatments like arthroplasty.},
language = {en},
number = {1},
urldate = {2023-12-13},
journal = {Nature Medicine},
author = {Pierson, Emma and Cutler, David M. and Leskovec, Jure and Mullainathan, Sendhil and Obermeyer, Ziad},
month = jan,
year = {2021},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {Machine learning, Social sciences},
pages = {136--140},
}
@misc{Arnold_23,
title = {How {Biased} {Data} and {Algorithms} {Can} {Harm} {Health} {\textbar} {Hopkins} {Bloomberg} {Public} {Health} {Magazine}},
url = {https://magazine.jhsph.edu/2022/how-biased-data-and-algorithms-can-harm-health},
author = {Arnold, Carrie},
abstract = {Public health researchers are working to uncover and correct unfairness in AI.},
language = {en},
urldate = {2023-12-13},
year= {2022}
}
@misc{Cote2022,
title = {7 DATA COLLECTION METHODS IN BUSINESS ANALYTICS},
url = {https://online.hbs.edu/blog/post/data-collection-methods},
author = {Cote, Catherine},
language = {en},
urldate = {2023-12-13},
year= {2022}
}
@misc{Walsh2023,
title = {The legal issues presented by generative AI},
url = {https://mitsloan.mit.edu/ideas-made-to-matter/legal-issues-presented-generative-ai},
author = {Walsh, Dylan},
language = {en},
urldate = {2023-12-13},
year= {2023}
}
@misc{nikulski_toxicity_2021,
title = {Toxicity in {AI} {Text} {Generation}},
url = {https://towardsdatascience.com/toxicity-in-ai-text-generation-9e9d9646e68f},
abstract = {This article provides an overview of toxic language generation, what toxicity in text generation means, why it occurs, and how it is currently being addressed.},
language = {en},
urldate = {2023-12-13},
journal = {Medium},
author = {Nikulski, Julia},
month = sep,
year = {2021},
}
@article{paul_safeguards_2020,
title = {Safeguards for the use of artificial intelligence and machine learning in global health},
volume = {98},
issn = {0042-9686},
url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7133486/},
doi = {10.2471/BLT.19.237099},
number = {4},
urldate = {2023-12-14},
journal = {Bulletin of the World Health Organization},
author = {Paul, Amy K and Schaefer, Merrick},
month = apr,
year = {2020},
pmid = {32284653},
pmcid = {PMC7133486},
pages = {282--284},
}
@misc{pearce_beware_2021,
title = {Beware the {Privacy} {Violations} in {Artificial} {Intelligence} {Applications}},
url = {https://www.isaca.org/resources/news-and-trends/isaca-now-blog/2021/beware-the-privacy-violations-in-artificial-intelligence-applications},
abstract = {It has been proposed that, “Privacy matters to the electorate, and smart business looks at how to use data to find out information while remaining in compliance with regulatory rules.” Since “smart business” also consists of “the electorate” as employees, at least one...},
urldate = {2023-12-14},
journal = {ISACA},
author = {Pearce, Guy},
year = {2021}
}
@misc{nigro_ai_nodate,
title = {{AI} security risks: {Separating} hype from reality {\textbar} {Security} {Magazine}},
shorttitle = {{AI} security risks},
url = {https://www.securitymagazine.com/articles/100219-ai-security-risks-separating-hype-from-reality},
abstract = {By investing in artificial intelligence training and the necessary tools, security professionals can harness the power of AI to enhance their capabilities.},
language = {en},
urldate = {2023-12-14},
year = {2023},
author = {Nigro, Pam},
}
@misc{CDC2023,
title = {Melanoma of the Skin Statistics},
url = {https://www.cdc.gov/cancer/skin/statistics/index.htm},
author = {CDC},
language = {en},
urldate = {2023-12-14},
year= {2023}
}
@article{Melarkode2023,
title={AI-Powered Diagnosis of Skin Cancer: A Contemporary Review, Open Challenges and Future Research Directions},
author={Melarkode, Navneet and Srinivasan, Kathiravan and Qaisar, Saeed Mian and Plawiak, Pawel},
journal={Cancers},
volume={15},
number={4},
pages={1183},
year={2023},
publisher={MDPI}
}
{lohr_what_2021,
title = {What {Ever} {Happened} to {IBM}’s {Watson}? - {The} {New} {York} {Times}},
url = {https://www.nytimes.com/2021/07/16/technology/what-happened-ibm-watson.html},
urldate = {2023-12-15},
author = {Lohr, Steve},
year = {2021},
}
@inproceedings{bender_dangers_2021,
address = {New York, NY, USA},
series = {{FAccT} '21},
title = {On the {Dangers} of {Stochastic} {Parrots}: {Can} {Language} {Models} {Be} {Too} {Big}? },
isbn = {978-1-4503-8309-7},
shorttitle = {On the {Dangers} of {Stochastic} {Parrots}},
url = {https://dl.acm.org/doi/10.1145/3442188.3445922},
doi = {10.1145/3442188.3445922},
abstract = {The past 3 years of work in NLP have been characterized by the development and deployment of ever larger language models, especially for English. BERT, its variants, GPT-2/3, and others, most recently Switch-C, have pushed the boundaries of the possible both through architectural innovations and through sheer size. Using these pretrained models and the methodology of fine-tuning them for specific tasks, researchers have extended the state of the art on a wide array of tasks as measured by leaderboards on specific benchmarks for English. In this paper, we take a step back and ask: How big is too big? What are the possible risks associated with this technology and what paths are available for mitigating those risks? We provide recommendations including weighing the environmental and financial costs first, investing resources into curating and carefully documenting datasets rather than ingesting everything on the web, carrying out pre-development exercises evaluating how the planned approach fits into research and development goals and supports stakeholder values, and encouraging research directions beyond ever larger language models.},
urldate = {2023-12-14},
booktitle = {Proceedings of the 2021 {ACM} {Conference} on {Fairness}, {Accountability}, and {Transparency}},
publisher = {Association for Computing Machinery},
author = {Bender, Emily M. and Gebru, Timnit and McMillan-Major, Angelina and Shmitchell, Shmargaret},
month = mar,
year = {2021},
pages = {610--623},
}
@article{sinz_engineering_2019,
title = {Engineering a {Less} {Artificial} {Intelligence}},
volume = {103},
issn = {08966273},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0896627319307408},
doi = {10.1016/j.neuron.2019.08.034},
language = {en},
number = {6},
urldate = {2023-12-15},
journal = {Neuron},
author = {Sinz, Fabian H. and Pitkow, Xaq and Reimer, Jacob and Bethge, Matthias and Tolias, Andreas S.},
month = sep,
year = {2019},
pages = {967--979},
}
@phdthesis{chaaya_privacy_2021,
type = {phdthesis},
title = {Privacy management in connected environments},
url = {https://theses.hal.science/tel-03446023},
abstract = {Recent years have witnessed rapid progress in enabling technologies for data sensing, communication and mining, paving the way for the phenomenal growth of smart connected environments (e.g., smart buildings, cities, factories). These environments are currently providing interesting and useful applications that help users in their everyday tasks (e.g. increasing comfort, reducing energy consumption). However, such applications require to collect, exchange, store, and process large amount of fine-granular data that is often privacy-sensitive for their users (e.g., location, energy-consumption), as its analysis allows data consumers to reveal sensitive information about them, such as their health conditions and preferences.Consequently, involving users in the management of their privacy is nowadays receiving extensive attention. Nonetheless, various improvements are still required. For instance, how to raise user awareness of the privacy risks involved in their data sharing and/or imposed by their environments. Moreover, how to enable users to assess their situations and make optimal data utility-privacy decisions accordingly.In this thesis, we focus on six main challenges: (1) representing diverse user contexts with a high semantic expressiveness power; (2) performing a holistic (all-data-inclusive) context-based privacy risk reasoning; (3) achieving user-centric privacy management; (4) making optimal context-based privacy decisions; (5) coping with the inter-context data dependency; and (6) delivering scalability and efficiency in order to assist the user in a variety of situations.To address these challenges, we first present an ontology-based data model capable of representing various user contexts with high-level information coverage. Following that, we introduce a context-aware semantic reasoning approach for privacy risk inference that provides a dynamic/contextual overview of risks tailored to the user's expertise. Then, to enable optimal management of data utility-privacy trade-offs, we propose a user-centric multi-objective approach for context-aware privacy management that provides dynamic best data protection strategies to be implemented based on user situations and preferences. Finally, we propose a new stochastic gradient descent solution for privacy-preserving during protection transitions, which gives an additional layer of protection against data inference attacks.The aforementioned contributions are regrouped in one global generic and extensible framework for context-aware privacy management.},
language = {en},
urldate = {2023-12-15},
school = {Université de Pau et des Pays de l'Adour},
author = {Chaaya, Karam Bou},
month = sep,
year = {2021},
}
@article{tucker_privacy_2018,
title = {Privacy, {Algorithms}, and {Artificial} {Intelligence}},
url = {https://ideas.repec.org//h/nbr/nberch/14011.html},
abstract = {No abstract is available for this item.},
language = {en},
urldate = {2023-12-15},
journal = {NBER Chapters},
author = {Tucker, Catherine},
year = {2018},
note = {Publisher: National Bureau of Economic Research, Inc},
pages = {423--437},
}
@misc{elefant_can_2023,
title = {Can {Lawyers} {Legally} and {Ethically} {Record} {Conversations} {With} {Clients} {Using} {Artificial} {Intelligence}?},
url = {https://myshingle.com/2023/07/articles/artificial-intelligence/can-lawyers-legally-and-ethically-record-conversations-with-clients-using-artificial-intelligence/},
abstract = {Abstract (prepared with ChatGPT 4): The rise of artificial intelligence (AI) and a surge in online meetings has made it increasingly common for attorneys to record and transcribe client conversatio…},
language = {en-US},
urldate = {2023-12-15},
journal = {My Shingle},
author = {Elefant, Carolyn},
month = jul,
year = {2023},
}
@article{andreotta_ai_2022,
title = {{AI}, big data, and the future of consent},
volume = {37},
issn = {1435-5655},
url = {https://doi.org/10.1007/s00146-021-01262-5},
doi = {10.1007/s00146-021-01262-5},
abstract = {In this paper, we discuss several problems with current Big data practices which, we claim, seriously erode the role of informed consent as it pertains to the use of personal information. To illustrate these problems, we consider how the notion of informed consent has been understood and operationalised in the ethical regulation of biomedical research (and medical practices, more broadly) and compare this with current Big data practices. We do so by first discussing three types of problems that can impede informed consent with respect to Big data use. First, we discuss the transparency (or explanation) problem. Second, we discuss the re-repurposed data problem. Third, we discuss the meaningful alternatives problem. In the final section of the paper, we suggest some solutions to these problems. In particular, we propose that the use of personal data for commercial and administrative objectives could be subject to a ‘soft governance’ ethical regulation, akin to the way that all projects involving human participants (e.g., social science projects, human medical data and tissue use) are regulated in Australia through the Human Research Ethics Committees (HRECs). We also consider alternatives to the standard consent forms, and privacy policies, that could make use of some of the latest research focussed on the usability of pictorial legal contracts.},
language = {en},
number = {4},
urldate = {2023-12-15},
journal = {AI \& SOCIETY},
author = {Andreotta, Adam J. and Kirkham, Nin and Rizzi, Marco},
month = dec,
year = {2022},
keywords = {AI, Big data, Informed consent, Moral responsibility, Privacy},
pages = {1715--1728},
}
@misc{Leek2017,
title = {Demystifying Artificial Intelligence},
url = {https://leanpub.com/demystifyai},
author = {Leek, Jeffrey T and Narayanan, Divya},
language = {en},
year= {2017}
}
@misc{ibm2023,
title = {AI vs. Machine Learning vs. Deep Learning vs. Neural Networks: What’s the difference?},
url = {https://www.ibm.com/blog/ai-vs-machine-learning-vs-deep-learning-vs-neural-networks/},
language = {en-US},
urldate = {2023-12-15},
author = {IBM Data \& AI Team},
year = {2023}
}
@misc{wikiNLP,
title = {Natural language processing},
url = {https://en.wikipedia.org/wiki/Natural_language_processing#History},
language = {en-US},
urldate = {2023-12-15},
author = {Wikipedia},
year = {2023}
}
@misc{gangarapu_ethics_2022,
title = {Ethics of {Facial} {Recognition}: {Key} {Issues} and {Solutions}},
shorttitle = {Ethics of {Facial} {Recognition}},
url = {https://learn.g2.com/ethics-of-facial-recognition},
abstract = {Facial recognition is one of the most advanced forms of biometric security facing ethical issues. Learn more about these issues and ways to mitigate them.},
language = {en},
urldate = {2023-12-15},
author = {Gangarapu, Katam Raju},
year = {2022},
}
@article{van_noorden_ethical_2020,
title = {The ethical questions that haunt facial-recognition research},
volume = {587},
copyright = {2021 Nature},
url = {https://www.nature.com/articles/d41586-020-03187-3},
doi = {10.1038/d41586-020-03187-3},
abstract = {Journals and researchers are under fire for controversial studies using this technology. And a Nature survey reveals that many researchers in this field think there is a problem.},
language = {en},
number = {7834},
urldate = {2023-12-15},
journal = {Nature},
author = {Van Noorden, Richard},
month = nov,
year = {2020},
note = {Bandiera\_abtest: a
Cg\_type: News Feature
Number: 7834
Publisher: Nature Publishing Group
Subject\_term: Machine learning, Ethics, Politics, Computer science},
keywords = {Computer science, Ethics, Machine learning, Politics},
pages = {354--358}
}
@misc{hao_deleting_2021,
title = {Deleting unethical data sets isn’t good enough},
url = {https://www.technologyreview.com/2021/08/13/1031836/ai-ethics-responsible-data-stewardship/},
abstract = {The AI research community has tried to scrub away its past. But the internet is forever.},
language = {en},
urldate = {2023-12-15},
journal = {MIT Technology Review},
author = {Hao, Karen},
year = {2021},
}
@misc{Gates_principles,
title = {The first principles guiding our work with {AI}},
url = {https://www.gatesfoundation.org/ideas/articles/artificial-intelligence-ai-development-principles},
abstract = {Gates Foundation CEO Mark Suzman shares the first principles on artificial intelligence (AI) that will be used to guide the foundation’s use of AI.},
language = {en},
urldate = {2023-12-15},
journal = {Bill \& Melinda Gates Foundation},
file = {Snapshot:/Users/carriewright/Zotero/storage/23P8UXSH/artificial-intelligence-ai-development-principles.html:text/html},
}
@article{fogel2022,
title={Defining artificial intelligence},
author={Fogel, David B},
journal={Machine Learning and the City: Applications in Architecture and Urban Design},
pages={91--120},
year={2022},
publisher={Wiley Online Library}
}
@misc{wikipedia_training_2023,
title = {Training, validation, and test data sets},
copyright = {Creative Commons Attribution-ShareAlike License},
url = {https://en.wikipedia.org/w/index.php?title=Training,_validation,_and_test_data_sets&oldid=1188399008},
abstract = {In machine learning, a common task is the study and construction of algorithms that can learn from and make predictions on data. Such algorithms function by making data-driven predictions or decisions, through building a mathematical model from input data. These input data used to build the model are usually divided into multiple data sets. In particular, three data sets are commonly used in different stages of the creation of the model: training, validation, and test sets.
The model is initially fit on a training data set, which is a set of examples used to fit the parameters (e.g. weights of connections between neurons in artificial neural networks) of the model. The model (e.g. a naive Bayes classifier) is trained on the training data set using a supervised learning method, for example using optimization methods such as gradient descent or stochastic gradient descent. In practice, the training data set often consists of pairs of an input vector (or scalar) and the corresponding output vector (or scalar), where the answer key is commonly denoted as the target (or label). The current model is run with the training data set and produces a result, which is then compared with the target, for each input vector in the training data set. Based on the result of the comparison and the specific learning algorithm being used, the parameters of the model are adjusted. The model fitting can include both variable selection and parameter estimation.
Successively, the fitted model is used to predict the responses for the observations in a second data set called the validation data set. The validation data set provides an unbiased evaluation of a model fit on the training data set while tuning the model's hyperparameters (e.g. the number of hidden units—layers and layer widths—in a neural network). Validation datasets can be used for regularization by early stopping (stopping training when the error on the validation data set increases, as this is a sign of over-fitting to the training data set).
This simple procedure is complicated in practice by the fact that the validation dataset's error may fluctuate during training, producing multiple local minima. This complication has led to the creation of many ad-hoc rules for deciding when over-fitting has truly begun.Finally, the test data set is a data set used to provide an unbiased evaluation of a final model fit on the training data set. If the data in the test data set has never been used in training (for example in cross-validation), the test data set is also called a holdout data set. The term "validation set" is sometimes used instead of "test set" in some literature (e.g., if the original data set was partitioned into only two subsets, the test set might be referred to as the validation set).Deciding the sizes and strategies for data set division in training, test and validation sets is very dependent on the problem and data available.},
language = {en},
urldate = {2023-12-16},
journal = {Wikipedia},
month = dec,
year = {2023},
note = {Page Version ID: 1188399008},
}
@article{baker_algorithmic_2022,
title = {Algorithmic {Bias} in {Education}},
volume = {32},
issn = {1560-4292},
doi = {10.1007/s40593-021-00285-9},
abstract = {In this paper, we review algorithmic bias in education, discussing the causes of that bias and reviewing the empirical literature on the specific ways that algorithmic bias is known to have manifested in education. While other recent work has reviewed mathematical definitions of fairness and expanded algorithmic approaches to reducing bias, our review focuses instead on solidifying the current understanding of the concrete impacts of algorithmic bias in education--which groups are known to be impacted and which stages and agents in the development and deployment of educational algorithms are implicated. We discuss theoretical and formal perspectives on algorithmic bias, connect those perspectives to the machine learning pipeline, and review metrics for assessing bias. Next, we review the evidence around algorithmic bias in education, beginning with the most heavily-studied categories of race/ethnicity, gender, and nationality, and moving to the available evidence of bias for less-studied categories, such as socioeconomic status, disability, and military-connected status. Acknowledging the gaps in what has been studied, we propose a framework for moving from unknown bias to known bias and from fairness to equity. We discuss obstacles to addressing these challenges and propose four areas of effort for mitigating and resolving the problems of algorithmic bias in AIED systems and other educational technology.},
language = {en},
number = {4},
urldate = {2023-12-17},
journal = {International Journal of Artificial Intelligence in Education},
author = {Baker, Ryan S. and Hawn, Aaron},
month = dec,
year = {2022},
note = {Publisher: Springer
ERIC Number: EJ1353563},
keywords = {Artificial Intelligence, Bias, Disabilities, Education, Mathematics, Military Personnel, Race, Sex, Socioeconomic Status, Veterans},
pages = {1052--1092},
}
@article{huang_evaluation_2022,
title = {Evaluation and {Mitigation} of {Racial} {Bias} in {Clinical} {Machine} {Learning} {Models}: {Scoping} {Review}},
volume = {10},
shorttitle = {Evaluation and {Mitigation} of {Racial} {Bias} in {Clinical} {Machine} {Learning} {Models}},
url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC9198828/},
doi = {10.2196/36388},
abstract = {Racial bias is a key concern regarding the development, validation, and implementation of machine learning (ML) models in clinical settings. Despite the potential of bias to propagate health disparities, racial bias in clinical ML has yet to be thoroughly ...},
language = {en},
number = {5},
urldate = {2023-12-17},
journal = {JMIR Medical Informatics},
author = {Huang, Jonathan and Galal, Galal and Etemadi, Mozziyar and Vaidyanathan, Mahesh},
month = may,
year = {2022},
pmid = {35639450},
note = {Publisher: JMIR Publications Inc.},
}
@misc{wikiECHO,
title = {Amazon Echo},
url = {https://en.wikipedia.org/wiki/Amazon_Echo},
language = {en-US},
urldate = {2023-12-17},
author = {Wikipedia},
year = {2023}
}
@misc{bansal_ubers_2022,
title = {Uber’s facial recognition is locking {Indian} drivers out of their accounts},
url = {https://www.technologyreview.com/2022/12/06/1064287/ubers-facial-recognition-is-locking-indian-drivers-out-of-their-accounts/},
abstract = {Some people are finding their accounts permanently blocked},
language = {en},
urldate = {2023-12-18},
journal = {MIT Technology Review},
author = {Bansal, Varsha},
year = {2022}
}
@article{brittain_more_2023,
chapter = {Technology},
title = {More writers sue {OpenAI} for copyright infringement over {AI} training},
url = {https://www.reuters.com/technology/more-writers-sue-openai-copyright-infringement-over-ai-training-2023-09-11/},
abstract = {A group of U.S. authors, including Pulitzer Prize winner Michael Chabon, has sued OpenAI in federal court in San Francisco, accusing the Microsoft-backed program of misusing their writing to train its popular artificial intelligence-powered chatbot ChatGPT.},
language = {en},
urldate = {2023-12-19},
journal = {Reuters},
author = {Brittain, Blake and Brittain, Blake},
month = sep,
year = {2023},
}
@article{Turner2016,
title = {Google Translate is getting really, really accurate},
url = {https://www.washingtonpost.com/news/innovations/wp/2016/10/03/google-translate-is-getting-really-really-accurate/},
language = {en-US},
urldate = {2023-12-19},
author = {Turner, Karen},
journal = {The Washington Post},
year = {2016}
}
@misc{Gu2023,
title = {New features make Translate more accessible for its 1 billion users},
url = {https://blog.google/products/translate/new-features-make-translate-more-accessible-for-its-1-billion-users/},
language = {en-US},
urldate = {2023-12-19},
author = {Gu, Xinxing},
year = {2023}
}
@article{turchin2021,
title={Using natural language processing to measure and improve quality of diabetes care: a systematic review},
author={Turchin, Alexander and Florez Builes, Luisa F},
journal={Journal of Diabetes Science and Technology},
volume={15},
number={3},
pages={553--560},
year={2021},
publisher={SAGE Publications Sage CA: Los Angeles, CA}
}
@misc{odsc2023,
title = {Origins of Generative AI and Natural Language Processing with ChatGPT},
url = {https://opendatascience.com/origins-of-generative-ai-and-natural-language-processing-with-chatgpt/},
language = {en-US},
urldate = {2023-12-19},
author = {ODSC},
year = {2023}
}
@article{vaswani2017,
title={Attention is all you need},
author={Vaswani, Ashish and Shazeer, Noam and Parmar, Niki and Uszkoreit, Jakob and Jones, Llion and Gomez, Aidan N and Kaiser, {\L}ukasz and Polosukhin, Illia},
journal={Advances in neural information processing systems},
volume={30},
year={2017}
}
@misc{tay2022efficient,
title={Efficient Transformers: A Survey},
author={Yi Tay and Mostafa Dehghani and Dara Bahri and Donald Metzler},
year={2022},
eprint={2009.06732},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
@article{kasneci2023,
title = {ChatGPT for good? On opportunities and challenges of large language models for education},
journal = {Learning and Individual Differences},
volume = {103},
pages = {102274},
year = {2023},
issn = {1041-6080},
doi = {https://doi.org/10.1016/j.lindif.2023.102274},
url = {https://www.sciencedirect.com/science/article/pii/S1041608023000195},
author = {Enkelejda Kasneci and Kathrin Sessler and Stefan Küchemann and Maria Bannert and Daryna Dementieva and Frank Fischer and Urs Gasser and Georg Groh and Stephan Günnemann and Eyke Hüllermeier and Stephan Krusche and Gitta Kutyniok and Tilman Michaeli and Claudia Nerdel and Jürgen Pfeffer and Oleksandra Poquet and Michael Sailer and Albrecht Schmidt and Tina Seidel and Matthias Stadler and Jochen Weller and Jochen Kuhn and Gjergji Kasneci},
}
@article{yang2023,
author = {Yang, Ling and Zhang, Zhilong and Song, Yang and Hong, Shenda and Xu, Runsheng and Zhao, Yue and Zhang, Wentao and Cui, Bin and Yang, Ming-Hsuan},
title = {Diffusion Models: A Comprehensive Survey of Methods and Applications},
year = {2023},
issue_date = {April 2024},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
volume = {56},
number = {4},
issn = {0360-0300},
url = {https://doi.org/10.1145/3626235},
doi = {10.1145/3626235},
abstract = {Diffusion models have emerged as a powerful new family of deep generative models with record-breaking performance in many applications, including image synthesis, video generation, and molecule design. In this survey, we provide an overview of the rapidly expanding body of work on diffusion models, categorizing the research into three key areas: efficient sampling, improved likelihood estimation, and handling data with special structures. We also discuss the potential for combining diffusion models with other generative models for enhanced results. We further review the wide-ranging applications of diffusion models in fields spanning from computer vision, natural language processing, temporal data modeling, to interdisciplinary applications in other scientific disciplines. This survey aims to provide a contextualized, in-depth look at the state of diffusion models, identifying the key areas of focus and pointing to potential areas for further exploration. Github:},
journal = {ACM Comput. Surv.},
month = {nov},
articleno = {105},
numpages = {39},
keywords = {diffusion models, stochastic differential equations, score-based generative models, Generative models}
}
@article{kingma2013,
title={Auto-encoding variational bayes},
author={Kingma, Diederik P and Welling, Max},
journal={arXiv preprint arXiv:1312.6114},
year={2013}
}
@inproceedings{karras2020,
title={Analyzing and improving the image quality of stylegan},
author={Karras, Tero and Laine, Samuli and Aittala, Miika and Hellsten, Janne and Lehtinen, Jaakko and Aila, Timo},
booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition},
pages={8110--8119},
year={2020}
}
@misc{zoom2023,
url={https://www.zoom.com/en/ai-assistant/},
title={Zoom’s smart AI assistant empowers you},
author={Zoom},
year={2023},
language = {en-US},
urldate = {2023-12-20},
}
@article{marino2023,
title={D2H2: diabetes data and hypothesis hub},
author={Marino, Giacomo B and Ahmed, Nasheath and Xie, Zhuorui and Jagodnik, Kathleen M and Han, Jason and Clarke, Daniel JB and Lachmann, Alexander and Keller, Mark P and Attie, Alan D and Ma’ayan, Avi},
journal={Bioinformatics Advances},
volume={3},
number={1},
year={2023},
publisher={Oxford University Press}
}
@article{habib2023,
author = {Habib, Anand R. and Gross, Cary P.},
title = "{FDA Regulations of AI-Driven Clinical Decision Support Devices Fall Short}",
journal = {JAMA Internal Medicine},
volume = {183},
number = {12},
pages = {1401-1402},
year = {2023},
month = {12},
abstract = "{We are entering a new era of computerized clinical decision support (CDS) tools. Companies are increasingly using artificial intelligence and/or machine learning (AI/ML) to develop new CDS devices, which are defined by the US Food and Drug Administration (FDA) as software used in disease prevention, diagnosis, or treatment. Recognizing the potential implications for clinical practice, the 21st Century Cures Act enjoined the FDA to regulate these new devices.In their case series reported in this issue of JAMA Internal Medicine, Lee and colleagues analyzed the evidence supporting FDA approval of 10 AI/ML CDS devices intended for use in critical care. Their findings are worrisome. Only 2 device authorizations cited peer-reviewed publications, and only 1 outlined a detailed safety risk assessment. No company provided software code to enable independent validation, evaluated clinical efficacy, or assessed whether the use of algorithms exacerbates health disparities.}",
issn = {2168-6106},
doi = {10.1001/jamainternmed.2023.5006},
url = {https://doi.org/10.1001/jamainternmed.2023.5006},
eprint = {https://jamanetwork.com/journals/jamainternalmedicine/articlepdf/2810620/jamainternal\_habib\_2023\_er\_230003\_1701463607.95585.pdf},
}
@misc{healthcareradio2023,
title={AMA issues new principles for AI development, deployment & use},
url={https://www.healthcarenowradio.com/ama-issues-new-principles-for-ai-development-deployment-use/#:~:text=Key%20concepts%20outlined%20by%20the,governance%20of%20health%20care%20AI.},
journal={HealthcareNOWradio.com},
author={News, Industry},
year={2023},
month={Dec}
}
@misc{ama2023,
title={AMA issues new principles for AI development, deployment & use},
url={https://www.ama-assn.org/press-center/press-releases/ama-issues-new-principles-ai-development-deployment-use},
journal={American Medical Association},
author={American Medical Association},
year={2023},
month=nov,
language={en}
}
@article{nigam2023,
author = {Shah, Nigam H. and Entwistle, David and Pfeffer, Michael A.},
title = "{Creation and Adoption of Large Language Models in Medicine}",
journal = {JAMA},
volume = {330},
number = {9},
pages = {866-869},
year = {2023},
month = {09},
abstract = "{There is increased interest in and potential benefits from using large language models (LLMs) in medicine. However, by simply wondering how the LLMs and the applications powered by them will reshape medicine instead of getting actively involved, the agency in shaping how these tools can be used in medicine is lost.Applications powered by LLMs are increasingly used to perform medical tasks without the underlying language model being trained on medical records and without verifying their purported benefit in performing those tasks.The creation and use of LLMs in medicine need to be actively shaped by provisioning relevant training data, specifying the desired benefits, and evaluating the benefits via testing in real-world deployments.}",
issn = {0098-7484},
doi = {10.1001/jama.2023.14217},
url = {https://doi.org/10.1001/jama.2023.14217},
eprint = {https://jamanetwork.com/journals/jama/articlepdf/2808296/jama\_shah\_2023\_sc\_230004\_1693922864.71803.pdf},
}
@article{schulman2023,
author = {Schulman, Kevin A. and Nielsen, Perry Kent, Jr and Patel, Kavita},
title = "{AI Alone Will Not Reduce the Administrative Burden of Health Care}",
journal = {JAMA},
volume = {330},
number = {22},
pages = {2159-2160},
year = {2023},
month = {12},
abstract = "{Large language models (LLMs) are some of the most exciting innovations to come from artificial intelligence research. The capacity of this technology is astonishing, and there are multiple different use cases being proposed where LLMs can solve pain points for physicians—everything from assistance with patient portal messages to clinical decision support for chronic care management to compiling clinical summaries. Another often discussed opportunity is to reduce administrative costs such as billing and insurance-related costs in health care. However, before jumping into technology as a solution, considering why the billing process is so challenging in the first place may be a better approach. After all, the prerequisite for a successful LLM application is the presence of “useful patterns” in the data.}",
issn = {0098-7484},
doi = {10.1001/jama.2023.23809},
url = {https://doi.org/10.1001/jama.2023.23809},
eprint = {https://jamanetwork.com/journals/jama/articlepdf/2812255/jama\_schulman\_2023\_vp\_230150\_1701364722.65094.pdf},
}
@article{bellaiche_humans_2023,
title = {Humans versus {AI}: whether and why we prefer human-created compared to {AI}-created artwork},
volume = {8},
issn = {2365-7464},
shorttitle = {Humans versus {AI}},
url = {https://doi.org/10.1186/s41235-023-00499-6},
doi = {10.1186/s41235-023-00499-6},
abstract = {With the recent proliferation of advanced artificial intelligence (AI) models capable of mimicking human artworks, AI creations might soon replace products of human creativity, although skeptics argue that this outcome is unlikely. One possible reason this may be unlikely is that, independent of the physical properties of art, we place great value on the imbuement of the human experience in art. An interesting question, then, is whether and why people might prefer human-compared to AI-created artworks. To explore these questions, we manipulated the purported creator of pieces of art by randomly assigning a “Human-created” or “AI-created” label to paintings actually created by AI, and then assessed participants’ judgements of the artworks across four rating criteria (Liking, Beauty, Profundity, and Worth). Study 1 found increased positive judgements for human- compared to AI-labelled art across all criteria. Study 2 aimed to replicate and extend Study 1 with additional ratings (Emotion, Story, Meaningful, Effort, and Time to create) intended to elucidate why people more-positively appraise Human-labelled artworks. The main findings from Study 1 were replicated, with narrativity (Story) and perceived effort behind artworks (Effort) moderating the label effects (“Human-created” vs. “AI-created”), but only for the sensory-level judgements (Liking, Beauty). Positive personal attitudes toward AI moderated label effects for more-communicative judgements (Profundity, Worth). These studies demonstrate that people tend to be negatively biased against AI-created artworks relative to purportedly human-created artwork, and suggest that knowledge of human engagement in the artistic process contributes positively to appraisals of art.},
number = {1},
urldate = {2023-12-30},
journal = {Cognitive Research: Principles and Implications},
author = {Bellaiche, Lucas and Shahi, Rohin and Turpin, Martin Harry and Ragnhildstveit, Anya and Sprockett, Shawn and Barr, Nathaniel and Christensen, Alexander and Seli, Paul},
month = jul,
year = {2023},
keywords = {Aesthetics, Artificial intelligence, Creativity, Judgements, Visual art},
pages = {42},
}
@article{granulo_preference_2021,
title = {Preference for {Human} (vs. {Robotic}) {Labor} is {Stronger} in {Symbolic} {Consumption} {Contexts}},
volume = {31},
copyright = {© 2020 The Authors. Journal of Consumer Psychology published by Wiley Periodicals LLC on behalf of Society for Consumer Psychology},
issn = {1532-7663},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/jcpy.1181},
doi = {10.1002/jcpy.1181},
abstract = {Advances in robotics, automation, and artificial intelligence increasingly enable firms to replace human labor with technology, thereby fundamentally transforming how goods and services are produced. From both managerial and societal points of view, it is therefore important to understand demand-side incentives for firms to employ human labor. We begin to address this question by examining for which products and services consumers are more likely to favor human (vs. robotic) labor. In six studies, we demonstrate that consumers prefer human (vs. robotic) labor more for products with higher (vs. lower) symbolic value (e.g., when expressing something about one's beliefs and personality is of greater importance). We theorize that this is because consumers have stronger uniqueness motives in more (vs. less) symbolic consumption contexts (and associate human labor more strongly with product uniqueness). In line with this account, we demonstrate that individual differences in need for uniqueness moderate the interaction between production mode and symbolic motives and that a measure of uniqueness motives mediates the effect of consumption context on preferences for human (vs. robotic) production.},
language = {en},
number = {1},
urldate = {2023-12-30},
journal = {Journal of Consumer Psychology},
author = {Granulo, Armin and Fuchs, Christoph and Puntoni, Stefano},
year = {2021},
note = {\_eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1002/jcpy.1181},
keywords = {Consumer preferences, Human labor, Robotic labor, Symbolic consumption, Uniqueness motives},
pages = {72--80},
}
@incollection{latar_robot_2015,
title = {The {Robot} {Journalist} in the {Age} of {Social} {Physics}: {The} {End} of {Human} {Journalism}?},
isbn = {978-3-319-09008-5},
shorttitle = {The {Robot} {Journalist} in the {Age} of {Social} {Physics}},
abstract = {In the age of Big Data, extracting knowledge from unlimited data silos employing Artificial Intelligence algorithms is becoming fundamental for the survival of society. We are living in an age of exponential growth in the complexity of social systems. We are at the dawn of an emergence of a new science some term as “social physics” that will allow to automatically analyse the billions of micro social engagements done continuously through our mobile devices in all fields of human activity (similar to the study of atoms in physics). This analysis of the social dynamics will allow to identify new social trends, social theories, at the “budding” stage.},
author = {Latar, Noam},
month = jan,
year = {2015},
doi = {10.1007/978-3-319-09009-2_6},
pages = {65--80},
}
@article{selenko_artificial_2022,
title = {Artificial {Intelligence} and the {Future} of {Work}: {A} {Functional}-{Identity} {Perspective}},
volume = {31},
issn = {0963-7214},
shorttitle = {Artificial {Intelligence} and the {Future} of {Work}},
url = {https://doi.org/10.1177/09637214221091823},
doi = {10.1177/09637214221091823},
abstract = {The impact of the implementation of artificial intelligence (AI) on workers’ experiences remains underexamined. Although AI-enhanced processes can benefit workers (e.g., by assisting with exhausting or dangerous tasks), they can also elicit psychological harm (e.g., by causing job loss or degrading work quality). Given AI’s uniqueness among other technologies, resulting from its expanding capabilities and capacity for autonomous learning, we propose a functional-identity framework to examine AI’s effects on people’s work-related self-understandings and the social environment at work. We argue that the conditions for AI to either enhance or threaten workers’ sense of identity derived from their work depends on how the technology is functionally deployed (by complementing tasks, replacing tasks, and/or generating new tasks) and how it affects the social fabric of work. Also, how AI is implemented and the broader social-validation context play a role. We conclude by outlining future research directions and potential application of the proposed framework to organizational practice.},
language = {en},
number = {3},
urldate = {2023-12-30},
journal = {Current Directions in Psychological Science},
author = {Selenko, Eva and Bankins, Sarah and Shoss, Mindy and Warburton, Joel and Restubog, Simon Lloyd D.},
month = jun,
year = {2022},
note = {Publisher: SAGE Publications Inc},
pages = {272--279},
}
@inproceedings{jansen_climate_2023,
address = {Virtual},
title = {The {Climate} {Crisis} is a {Digital} {Rights} {Crisis}: {Exploring} the {Civil}-{Society} {Framing} of {Two} {Intersecting} {Disasters}},
shorttitle = {The {Climate} {Crisis} is a {Digital} {Rights} {Crisis}},
url = {https://limits.pubpub.org/pub/8544yai8},
doi = {10.21428/bf6fb269.b4704652},
abstract = {Digitization is often presented as one of the key technologies to avoid or mitigate the climate crisis. Yet, we see the same technologies being used to promote disinformation, facilitate unsustainable business models, and to surveil climate justice advocates and land defenders. We also see a growing digital divide, a disparity between communities that reap the benefits of modern internet infrastructures, and those communities whose land and labor are essential for the production of these infrastructures but who remain comparatively disconnected and are exposed to the negative material and social impacts of the technology. In this paper we report and discuss our findings from exploring the intersection of the climate crisis and digital rights in an interactive workshop with civil society representatives and policy makers at an important digital-rights event in Brussels in the European Union. We asked participants to collectively explore prevalent issues, framing(s), and governance initiatives at that nexus. We found that, while our workshop participants were predominantly concerned with material aspects of digital infrastructures, awareness of governance and business models as relevant factors for equitable and just digital infrastructures is present. These aspects need to make their way into upcoming legislation.},
language = {en},
urldate = {2024-01-01},
booktitle = {Ninth {Computing} within {Limits} 2023},
publisher = {LIMITS},
author = {Jansen, Fieke and Gulmez, Merve and Kazansky, Becky and Bakari, Narmine Abou and Fernandez, Claire and Kingaby, Harriet and Mühlberg, Jan Tobias},
month = jun,
year = {2023},
}
@misc{hulick_training_2021,
title = {Training {AI} to be really smart poses risks to climate},
url = {https://www.snexplores.org/article/training-ai-energy-emissions-climate-risk},
abstract = {As artificial intelligence models grow larger and consume more energy, experts have begun to worry about their impact on Earth’s climate.},
language = {en-US},
urldate = {2024-01-01},
author = {Hulick, Kathryn},
month = mar,
year = {2021},
note = {Section: Tech}
}