-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.bib
1049 lines (967 loc) · 45.7 KB
/
main.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
@String(PAMI = {IEEE Trans. Pattern Anal. Mach. Intell.})
@String(IJCV = {Int. J. Comput. Vis.})
@String(CVPR= {IEEE Conf. Comput. Vis. Pattern Recog.})
@String(ICCV= {Int. Conf. Comput. Vis.})
@String(ECCV= {Eur. Conf. Comput. Vis.})
@String(NIPS= {Adv. Neural Inform. Process. Syst.})
@String(ICPR = {Int. Conf. Pattern Recog.})
@String(BMVC= {Brit. Mach. Vis. Conf.})
@String(TOG= {ACM Trans. Graph.})
@String(TIP = {IEEE Trans. Image Process.})
@String(TVCG = {IEEE Trans. Vis. Comput. Graph.})
@String(TMM = {IEEE Trans. Multimedia})
@String(ACMMM= {ACM Int. Conf. Multimedia})
@String(ICME = {Int. Conf. Multimedia and Expo})
@String(ICASSP= {ICASSP})
@String(ICIP = {IEEE Int. Conf. Image Process.})
@String(ACCV = {ACCV})
@String(ICLR = {Int. Conf. Learn. Represent.})
@String(IJCAI = {IJCAI})
@String(PR = {Pattern Recognition})
@String(AAAI = {AAAI})
@String(CVPRW= {IEEE Conf. Comput. Vis. Pattern Recog. Worksh.})
@String(CSVT = {IEEE Trans. Circuit Syst. Video Technol.})
@String(SPL = {IEEE Sign. Process. Letters})
@String(VR = {Vis. Res.})
@String(JOV = {J. Vis.})
@String(TVC = {The Vis. Comput.})
@String(JCST = {J. Comput. Sci. Tech.})
@String(CGF = {Comput. Graph. Forum})
@String(CVM = {Computational Visual Media})
@String(PAMI = {IEEE TPAMI})
@String(IJCV = {IJCV})
@String(CVPR = {CVPR})
@String(ICCV = {ICCV})
@String(ECCV = {ECCV})
@String(NIPS = {NeurIPS})
@String(ICPR = {ICPR})
@String(BMVC = {BMVC})
@String(TOG = {ACM TOG})
@String(TIP = {IEEE TIP})
@String(TVCG = {IEEE TVCG})
@String(TCSVT = {IEEE TCSVT})
@String(TMM = {IEEE TMM})
@String(ACMMM = {ACM MM})
@String(ICME = {ICME})
@String(ICASSP= {ICASSP})
@String(ICIP = {ICIP})
@String(ACCV = {ACCV})
@String(ICLR = {ICLR})
@String(IJCAI = {IJCAI})
@String(PR = {PR})
@String(AAAI = {AAAI})
@String(CVPRW= {CVPRW})
@String(CSVT = {IEEE TCSVT})
@software{ilharco_openclip_2021,
title={OpenCLIP},
author={Ilharco, Gabriel and Wortsman, Mitchell and Wightman, Ross and Gordon, Cade and Carlini, Nicholas and Taori, Rohan and Dave, Achal and Shankar, Vaishaal and Namkoong, Hongseok and Miller, John and Hajishirzi, Hannaneh and Farhadi, Ali and Schmidt, Ludwig},
year={2021},
doi={10.5281/zenodo.5143773},
version={v0.1},
date={2021-07-28}
}
@misc{vimz,
author = {Stefan Dziembowski and Shahriar Ebrahimi and Parisa Hassanizadeh},
title = {{VIMz}: Verifiable Image Manipulation using Folding-based {zkSNARKs}},
howpublished = {Cryptology {ePrint} Archive, Paper 2024/1063},
year = {2024},
url = {https://eprint.iacr.org/2024/1063}
}
@misc{veritas,
author = {Trisha Datta and Binyi Chen and Dan Boneh},
title = {{VerITAS}: Verifying Image Transformations at Scale},
howpublished = {Cryptology {ePrint} Archive, Paper 2024/1066},
year = {2024},
url = {https://eprint.iacr.org/2024/1066}
}
@article{perceptual_hash_security_2024,
title={Assessing the Adversarial Security of Perceptual Hashing Algorithms},
author={Madden, Jordan},
journal={arXiv preprint},
year={2024},
note={arXiv:2406.00918},
abstract={This paper reveals that perceptual hashing algorithms exhibit vulnerabilities allowing original images to be reconstructed from hash bits, raising significant privacy concerns.}
}
@misc{iris-search,
author = {Remco Bloemen and Bryan Gillespie and Daniel Kales and Philipp Sippl and Roman Walch},
title = {Large-Scale {MPC}: Scaling Private Iris Code Uniqueness Checks to Millions of Users},
howpublished = {Cryptology {ePrint} Archive, Paper 2024/705},
year = {2024},
url = {https://eprint.iacr.org/2024/705}
}
@misc{wally-search,
title = {Scalable Private Search with Wally},
author = {Hilal Asi and Fabian Boemer and Nicholas Genise and Muhammad Haris Mughees and Tabitha Ogilvie and Rehan Rishi and Guy N. Rothblum and Kunal Talwar and Karl Tarbe and Ruiyu Zhu and Marco Zuliani},
year = {2024},
URL = {https://arxiv.org/abs/2406.06761}
}
@ARTICLE{YoutubeCompression,
author={Che, Xianhui and Ip, Barry and Lin, Ling},
journal={IEEE MultiMedia},
title={A Survey of Current YouTube Video Characteristics},
year={2015},
volume={22},
number={2},
pages={56-63},
keywords={YouTube;Streaming media;Electron tubes;Uniform resource locators;Entertainment industry;Blogs;Social network services;YouTube;online video;traffic analysis;multimedia;data analysis;network engineering;networking},
doi={10.1109/MMUL.2015.34}
}
@article{phash2020,
title = {Perceptual hashing for image authentication: A survey},
journal = {Signal Processing: Image Communication},
volume = {81},
pages = {115713},
year = {2020},
issn = {0923-5965},
doi = {https://doi.org/10.1016/j.image.2019.115713},
url = {https://www.sciencedirect.com/science/article/pii/S0923596519301286},
author = {Ling Du and Anthony T.S. Ho and Runmin Cong},
keywords = {Tamper detection, Perceptual image hashing, Content authenticity analysis, Security},
abstract = {Perceptual hashing is used for multimedia content identification and authentication through perception digests based on the understanding of multimedia content. This paper presents a literature review of image hashing for image authentication in the last decade. The objective of this paper is to provide a comprehensive survey and to highlight the pros and cons of existing state-of-the-art techniques. In this article, the general structure and classifications of image hashing based tamper detection techniques with their properties are exploited. Furthermore, the evaluation datasets and different performance metrics are also discussed. The paper concludes with recommendations and good practices drawn from the reviewed techniques.}
}
@article{VANHOUTEN200948,
title = {Source video camera identification for multiply compressed videos originating from YouTube},
journal = {Digital Investigation},
volume = {6},
number = {1},
pages = {48-60},
year = {2009},
issn = {1742-2876},
doi = {https://doi.org/10.1016/j.diin.2009.05.003},
url = {https://www.sciencedirect.com/science/article/pii/S1742287609000310},
author = {Wiger {van Houten} and Zeno Geradts},
keywords = {Photo response non-uniformity, Video camera identification, Pattern noise, Digital forensics, YouTube, Webcam video, Mobile phone, MSN messenger, Windows live messenger},
abstract = {The Photo Response Non-Uniformity is a unique sensor noise pattern that is present in each image or video acquired with a digital camera. In this work a wavelet-based technique used to extract these patterns from digital images is applied to compressed low resolution videos originating mainly from webcams. After recording these videos with a variety of codec and resolution settings, the videos were uploaded to YouTube, a popular internet video sharing website. By comparing the average pattern extracted from these resulting downloaded videos with the average pattern obtained from multiple reference cameras of the same brand and type, it was attempted to identify the source camera. This may be of interest in cases of child abuse or child pornography. Depending on the codec, quality settings and recording resolution, very satisfactory results were obtained.}
}
@article{laghari2018assessment,
title={Assessment of quality of experience ({QoE}) of image compression in social cloud computing},
author={Laghari, Asif Ali and He, Hui and Shafiq, Muhammad and Khan, Asiya},
journal={Multiagent and Grid Systems},
volume={14},
number={2},
pages={125--143},
year={2018},
publisher={IOS Press},
doi={10.3233/MGS-180284},
month={June},
note={Received: 13 September 2017; Accepted: 25 February 2018; Published: 26 June 2018}
}
@article{anwar2021image,
title={Image Quality Analysis of PNG Images on WhatsApp Messenger Sending},
author={Anwar, Fahmi and Fadlil, Abdul and Riadi, Imam},
journal={Telematika},
volume={14},
number={1},
pages={1--12},
year={2021},
month={February},
publisher={AMIKOM Purwokerto},
issn={2442-4528},
doi={10.35671/telematika.v14i1.1114},
note={Accreditated SINTA "2" Kemenristek/BRIN, No. 85/M/KPT/2020}
}
@article{deep_watermark,
title={HiDDeN: Hiding Data With Deep Networks},
author={Zhu, Jiren and Kaplan, Russell and Johnson, Justin and Fei-Fei, Li},
journal={European Conference on Computer Vision},
pages={682--697},
year={2018}
}
@inproceedings{oquab2023dinov2,
title={DINOv2: Learning Robust Visual Features without Supervision},
author={Oquab, Maxime and Darcet, Timothée and Moutakanni, Theo and Vo, Huy V and Szafraniec, Marc and Khalidov, Vasil and Fernandez, Pierre and Haziza, Daniel and Massa, Francisco and El-Nouby, Alaaeldin and others},
booktitle={International Conference on Computer Vision and Pattern Recognition},
year={2023}
}
@inproceedings{gentry2009fully,
title={Fully homomorphic encryption using ideal lattices},
author={Gentry, Craig},
booktitle={Proceedings of the forty-first annual ACM symposium on Theory of computing},
pages={169--178},
year={2009}
}
@article{dijk2010fully,
title={Fully homomorphic encryption over the integers},
author={Van Dijk, Marten and Gentry, Craig and Halevi, Shai and Vaikuntanathan, Vinod},
journal={Annual International Conference on the Theory and Applications of Cryptographic Techniques},
pages={24--43},
year={2010},
publisher={Springer}
}
@inproceedings{meta2023stablesig,
title={Stable Signature: Structural Watermarking for Diffusion Models},
author={Wang, Jie and Wang, Mingdeng and Yang, Zhen and Wang, Hao and Li, Yujun and Zhang, Lei and Qi, Gui-Song},
booktitle={arXiv preprint arXiv:2312.12391},
year={2023}
}
@inproceedings{struppek2022learning,
title={Learning to Break Deep Perceptual Hashing: The Use Case NeuralHash},
author={Struppek, Lukas and Hintersdorf, Dominik and Neider, Daniel and Kersting, Kristian},
booktitle={Proceedings of the 2022 ACM Conference on Fairness, Accountability, and Transparency},
pages={1795--1807},
year={2022}
}
@article{c2pa2023,
title={{C2PA} Technical Specification},
author={{Coalition for Content Provenance and Authenticity}},
journal={Content Authenticity Initiative},
year={2023}
}
@misc{Authors14,
author = {FirstName LastName},
title = {The frobnicatable foo filter},
note = {Face and Gesture submission ID 324. Supplied as supplemental material {\tt fg324.pdf}},
year = 2014
}
@misc{Authors14b,
author = {FirstName LastName},
title = {Frobnication tutorial},
note = {Supplied as supplemental material {\tt tr.pdf}},
year = 2014
}
@article{Alpher02,
author = {FirstName Alpher},
title = {Frobnication},
journal = PAMI,
volume = 12,
number = 1,
pages = {234--778},
year = 2002
}
@article{Alpher03,
author = {FirstName Alpher and FirstName Fotheringham-Smythe},
title = {Frobnication revisited},
journal = {Journal of Foo},
volume = 13,
number = 1,
pages = {234--778},
year = 2003
}
@article{Alpher04,
author = {FirstName Alpher and FirstName Fotheringham-Smythe and FirstName Gamow},
title = {Can a machine frobnicate?},
journal = {Journal of Foo},
volume = 14,
number = 1,
pages = {234--778},
year = 2004
}
@inproceedings{Alpher05,
author = {FirstName Alpher and FirstName Gamow},
title = {Can a computer frobnicate?},
booktitle = CVPR,
pages = {234--778},
year = 2005
}
@misc{OpenAIPolicies,
publisher = {OpeAI},
howpublished = {\url{https://platform.openai.com/docs/models/how-we-use-your-data}},
title = {How we use your data},
year = {2023},
note = {Accessed: 2024-10-08}
}
@misc{OpenAIHelp,
publisher = {OpeAI},
howpublished = {\url{https://help.openai.com/en/articles/7039943-data-usage-for-consumer-services-faq}},
title = {Data usage for consumer services FAQ},
year = {2024},
note = {Accessed: 2024-10-26}
}
@article{bloemen2024large,
title={Large-Scale MPC: Scaling Private Iris Code Uniqueness Checks to Millions of Users},
author={Bloemen, Remco and Kales, Daniel and Sippl, Philipp and Walch, Roman},
journal={arXiv preprint arXiv:2405.04463},
year={2024}
}
@inproceedings{edalatnejad2024janus,
title={Janus: Safe biometric deduplication for humanitarian aid distribution},
author={EdalatNejad, Kasra and Lueks, Wouter and Sukaitis, Justinas and Narbel, Vincent Graf and Marelli, Massimo and Troncoso, Carmela},
booktitle={2024 IEEE Symposium on Security and Privacy (SP)},
pages={655--672},
year={2024},
organization={IEEE}
}
@inproceedings{ozdemir2022experimenting,
title={Experimenting with collaborative $\{$zk-SNARKs$\}$:$\{$Zero-Knowledge$\}$ proofs for distributed secrets},
author={Ozdemir, Alex and Boneh, Dan},
booktitle={31st USENIX Security Symposium (USENIX Security 22)},
pages={4291--4308},
year={2022}
}
@inproceedings{garg2023zksaas,
title={$\{$zkSaaS$\}$:$\{$Zero-Knowledge$\}$$\{$SNARKs$\}$ as a Service},
author={Garg, Sanjam and Goel, Aarushi and Jain, Abhishek and Policharla, Guru-Vamsi and Sekar, Sruthi},
booktitle={32nd USENIX Security Symposium (USENIX Security 23)},
pages={4427--4444},
year={2023}
}
@misc{Taceo,
publisher = {TACEO},
howpublished = {\url{https://github.com/TaceoLabs/co-snarks}},
title = {coCircom},
note = {Accessed: 2024-10-26}
}
@article{Shamir,
author = {Shamir, Adi},
title = {How to share a secret},
year = {1979},
issue_date = {Nov. 1979},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
volume = {22},
number = {11},
issn = {0001-0782},
url = {https://doi.org/10.1145/359168.359176},
doi = {10.1145/359168.359176},
abstract = {In this paper we show how to divide data D into n pieces in such a way that D is easily reconstructable from any k pieces, but even complete knowledge of k - 1 pieces reveals absolutely no information about D. This technique enables the construction of robust key management schemes for cryptographic systems that can function securely and reliably even when misfortunes destroy half the pieces and security breaches expose all but one of the remaining pieces.},
journal = {Commun. ACM},
month = nov,
pages = {612–613},
numpages = {2},
keywords = {cryptography, interpolation, key management}
}
@inproceedings{benhamouda2021generalized,
title={Generalized pseudorandom secret sharing and efficient straggler-resilient secure computation},
author={Benhamouda, Fabrice and Boyle, Elette and Gilboa, Niv and Halevi, Shai and Ishai, Yuval and Nof, Ariel},
booktitle={Theory of Cryptography: 19th International Conference, TCC 2021, Raleigh, NC, USA, November 8--11, 2021, Proceedings, Part II 19},
pages={129--161},
year={2021},
organization={Springer}
}
@inproceedings{baum2014publicly,
title={Publicly auditable secure multi-party computation},
author={Baum, Carsten and Damg{\aa}rd, Ivan and Orlandi, Claudio},
booktitle={Security and Cryptography for Networks: 9th International Conference, SCN 2014, Amalfi, Italy, September 3-5, 2014. Proceedings 9},
pages={175--196},
year={2014},
organization={Springer}
}
@misc{ensemble,
title={Ensemble Adversarial Training: Attacks and Defenses},
author={Florian Tramèr and Alexey Kurakin and Nicolas Papernot and Ian Goodfellow and Dan Boneh and Patrick McDaniel},
year={2017},
eprint={1705.07204},
archivePrefix={arXiv},
primaryClass={stat.ML}
}
@misc{adversariallogitpairing,
title={Adversarial Logit Pairing},
author={Harini Kannan and Alexey Kurakin and Ian Goodfellow},
year={2018},
eprint={1803.06373},
archivePrefix={arXiv},
primaryClass={cs.LG},
url={https://arxiv.org/abs/1803.06373},
}
@misc{pgddefense,
title={Towards Deep Learning Models Resistant to Adversarial Attacks},
author={Aleksander Madry and Aleksandar Makelov and Ludwig Schmidt and Dimitris Tsipras and Adrian Vladu},
year={2019},
eprint={1706.06083},
archivePrefix={arXiv},
primaryClass={stat.ML},
url={https://arxiv.org/abs/1706.06083},
}
@INPROCEEDINGS{bart,
author={Raff, Edward and Sylvester, Jared and Forsyth, Steven and McLean, Mark},
booktitle={2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
title={Barrage of Random Transforms for Adversarially Robust Defense},
year={2019},
volume={},
number={},
pages={6521-6530},
keywords={Computer vision;Perturbation methods;Transforms;Pattern recognition;Recognition: Detection;Categorization;Retrieval;Big Data;Large Scale Methods;Deep Learning},
doi={10.1109/CVPR.2019.00669}}
@misc{randomizedsmoothing,
title={Certified Adversarial Robustness via Randomized Smoothing},
author={Jeremy M Cohen and Elan Rosenfeld and J. Zico Kolter},
year={2019},
eprint={1902.02918},
archivePrefix={arXiv},
primaryClass={cs.LG},
url={https://arxiv.org/abs/1902.02918},
}
@misc{defensegan,
title={Defense-GAN: Protecting Classifiers Against Adversarial Attacks Using Generative Models},
author={Pouya Samangouei and Maya Kabkab and Rama Chellappa},
year={2018},
eprint={1805.06605},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/1805.06605},
}
@misc{deephash,
title={DeepHash: Getting Regularization, Depth and Fine-Tuning Right},
author={Jie Lin and Olivier Morere and Vijay Chandrasekhar and Antoine Veillard and Hanlin Goh},
year={2015},
eprint={1501.04711},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/1501.04711},
}
@INPROCEEDINGS{deep_supervised_hashing_fast_retrieval,
author={Liu, Haomiao and Wang, Ruiping and Shan, Shiguang and Chen, Xilin},
booktitle={2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
title={Deep Supervised Hashing for Fast Image Retrieval},
year={2016},
volume={},
number={},
pages={2064-2072},
keywords={Binary codes;Training;Semantics;Image retrieval;Quantization (signal);Feature extraction},
doi={10.1109/CVPR.2016.227}}
@INPROCEEDINGS{deep_hashing_compact_codes,
author={Liong, Venice Erin and Jiwen Lu and Gang Wang and Moulin, Pierre and Jie Zhou},
booktitle={2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
title={Deep hashing for compact binary codes learning},
year={2015},
volume={},
number={},
pages={2475-2483},
keywords={Binary codes;DH-HEMTs;Synchronous digital hierarchy;Training;Visualization;Machine learning;Optimization},
doi={10.1109/CVPR.2015.7298862}}
@misc{deep_semantic_ranking_hashing_multilabel,
title={Deep Semantic Ranking Based Hashing for Multi-Label Image Retrieval},
author={Fang Zhao and Yongzhen Huang and Liang Wang and Tieniu Tan},
year={2015},
eprint={1501.06272},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/1501.06272},
}
@article{deep_supervised_hashing_large_scale,
title={SSDH: Semi-Supervised Deep Hashing for Large Scale Image Retrieval},
volume={29},
ISSN={1558-2205},
url={http://dx.doi.org/10.1109/TCSVT.2017.2771332},
DOI={10.1109/tcsvt.2017.2771332},
number={1},
journal={IEEE Transactions on Circuits and Systems for Video Technology},
publisher={Institute of Electrical and Electronics Engineers (IEEE)},
author={Zhang, Jian and Peng, Yuxin},
year={2019},
month=jan, pages={212–225} }
@misc{waves,
title={WAVES: Benchmarking the Robustness of Image Watermarks},
author={Bang An and Mucong Ding and Tahseen Rabbani and Aakriti Agrawal and Yuancheng Xu and Chenghao Deng and Sicheng Zhu and Abdirisak Mohamed and Yuxin Wen and Tom Goldstein and Furong Huang},
year={2024},
eprint={2401.08573},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2401.08573},
}
@Inproceedings{marra2019DoGAN,
author={Francesco Marra and Diego Gragnaniello and Luisa Verdoliva and Giovanni Poggi},
booktitle=mipr,
pages={506--511},
title={{Do GANs Leave Artificial Fingerprints?}},
year={2019}
}
@inproceedings{yu2019attributing,
author = {Yu, Ning and Davis, Larry and Fritz, Mario},
title = {{Attributing Fake Images to GANs: Learning and Analyzing GAN Fingerprints}},
booktitle=iccv,
pages={7555-7565},
year = {2019}
}
@inproceedings{sinitsa2023deep,
title={Deep Image Fingerprint: Accurate And Low Budget Synthetic Image Detector},
author={Sinitsa, Sergey and Fried, Ohad},
booktitle=wacv,
year={2024}
}
@inproceedings{liu2022detecting,
title={Detecting generated images by real images},
author={Liu, Bo and Yang, Fan and Bi, Xiuli and Xiao, Bin and Li, Weisheng and Gao, Xinbo},
booktitle=eccv,
pages={95--110},
year={2022},
}
@article{cozzolino2018forensictransfer,
title={{ForensicTransfer: Weakly-supervised domain adaptation for forgery detection}},
author={Cozzolino, Davide and Thies, Justus and R{\"o}ssler, Andreas and Riess, Christian and Nie{\ss}ner, Matthias and Verdoliva, Luisa},
journal={arXiv preprint arXiv:1812.02510v2},
year={2018}
}
@INPROCEEDINGS{marra2019incremental,
author={Marra, Francesco and Saltori, Cristiano and Boato, Giulia and Verdoliva, Luisa},
booktitle=wifs,
title={{Incremental learning for the detection and classification of GAN-generated images}},
year={2019},
pages={1-6},
}
@inproceedings{du2020towards,
author = {Du, Mengnan and Pentyala, Shiva and Li, Yuening and Hu, Xia},
title = {{Towards Generalizable Deepfake Detection with Locality-Aware AutoEncoder}},
year = {2020},
booktitle = {CIKM},
pages = {325–334},
numpages = {10},
}
@InProceedings{jeon2020tgd,
title = {{T-GD: Transferable {GAN}-generated Images Detection Framework}},
author = {Jeon, Hyeonseong and Bang, Young Oh and Kim, Junyaup and Woo, Simon},
booktitle =icml,
pages = {4746--4761},
year = {2020},
volume = {119},
}
@InProceedings{durall2020watch,
author = {Durall, Ricard and Keuper, Margret and Keuper, Janis},
title = {{Watch Your Up-Convolution: CNN Based Generative Deep Neural Networks Are Failing to Reproduce Spectral Distributions}},
booktitle = cvpr,
pages={7890--7899},
month = {June},
year = {2020}
}
@inproceedings{frank2020leveraging,
title={{Leveraging Frequency Analysis for Deep Fake Image Recognition}},
author={Frank, Joel and Eisenhofer, Thorsten and Sch{\"o}nherr, Lea and Fischer, Asja and Kolossa, Dorothea and Holz, Thorsten},
booktitle=icml,
pages={3247--3258},
year={2020},
}
@inproceedings{dzanic2020fourier,
title={{Fourier spectrum discrepancies in deep network generated images}},
author={Tarik Dzanic and Karan Shah and Freddie D. Witherden},
booktitle=nips,
pages={3022--3032},
year={2020}
}
@inproceedings{radford2021learning,
title={Learning Transferable Visual Models From Natural Language Supervision},
author={Alec Radford and JongWook Kim and Chris Hallacy and Aditya Ramesh and Gabriel Goh and Sandhini Agarwal and Girish Sastry and Amanda Askell and Pamela Mishkin and Jack Clark et al.},
booktitle=icml,
pages={8748--8763},
year={2021}
}
@article{amoroso2023parents,
title={{Parents and Children: Distinguishing Multimodal DeepFakes from Natural Images}},
author={Amoroso, Roberto and Morelli, Davide and Cornia, Marcella and Baraldi, Lorenzo and Del Bimbo, Alberto and Cucchiara, Rita},
journal={arXiv preprint arXiv:2304.00500v1},
year={2023}
}
@inproceedings{ojha2023towards,
title={Towards universal fake image detectors that generalize across generative models},
author={Ojha, Utkarsh and Li, Yuheng and Lee, Yong Jae},
booktitle=cvpr,
pages={24480--24489},
year={2023}
}
@inproceedings{sha2023fake,
author = {Sha, Zeyang and Li, Zheng and Yu, Ning and Zhang, Yang},
title={{DE-FAKE: Detection and Attribution of Fake Images Generated by Text-to-Image Diffusion Models}},
year = {2023},
booktitle = {ACM SIGSAC Conference on Computer and Communications Security},
pages = {3418–3432},
numpages = {15},
}
@inproceedings{cozzolino2024raising,
author={Davide Cozzolino and Giovanni Poggi and Riccardo Corvi and Matthias Nießner and Luisa Verdoliva},
title={{Raising the Bar of AI-generated Image Detection with CLIP}},
booktitle={CVPR Workshops},
year={2024},
}
@article{bammey2023synthbuster,
title={Synthbuster: Towards Detection of Diffusion Model Generated Images},
author={Bammey, Quentin},
journal={IEEE OJSP},
url = {https://zenodo.org/records/10066460},
year={2023}
}
@misc{jafari2021surveylocalitysensitivehashing,
title={A Survey on Locality Sensitive Hashing Algorithms and their Applications},
author={Omid Jafari and Preeti Maurya and Parth Nagarkar and Khandker Mushfiqul Islam and Chidambaram Crushev},
year={2021},
eprint={2102.08942},
archivePrefix={arXiv},
primaryClass={cs.DB},
url={https://arxiv.org/abs/2102.08942},
}
@article{locality,
title={DET-LSH: A Locality-Sensitive Hashing Scheme with Dynamic Encoding Tree for Approximate Nearest Neighbor Search},
volume={17},
ISSN={2150-8097},
url={http://dx.doi.org/10.14778/3665844.3665854},
DOI={10.14778/3665844.3665854},
number={9},
journal={Proceedings of the VLDB Endowment},
publisher={Association for Computing Machinery (ACM)},
author={Wei, Jiuqi and Peng, Botao and Lee, Xiaodong and Palpanas, Themis},
year={2024},
month=may, pages={2241–2254} }
@inproceedings{inversion_attack,
author = {Fredrikson, Matt and Jha, Somesh and Ristenpart, Thomas},
title = {Model Inversion Attacks that Exploit Confidence Information and Basic Countermeasures},
year = {2015},
isbn = {9781450338325},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/2810103.2813677},
doi = {10.1145/2810103.2813677},
abstract = {Machine-learning (ML) algorithms are increasingly utilized in privacy-sensitive applications such as predicting lifestyle choices, making medical diagnoses, and facial recognition. In a model inversion attack, recently introduced in a case study of linear classifiers in personalized medicine by Fredrikson et al., adversarial access to an ML model is abused to learn sensitive genomic information about individuals. Whether model inversion attacks apply to settings outside theirs, however, is unknown. We develop a new class of model inversion attack that exploits confidence values revealed along with predictions. Our new attacks are applicable in a variety of settings, and we explore two in depth: decision trees for lifestyle surveys as used on machine-learning-as-a-service systems and neural networks for facial recognition. In both cases confidence values are revealed to those with the ability to make prediction queries to models. We experimentally show attacks that are able to estimate whether a respondent in a lifestyle survey admitted to cheating on their significant other and, in the other context, show how to recover recognizable images of people's faces given only their name and access to the ML model. We also initiate experimental exploration of natural countermeasures, investigating a privacy-aware decision tree training algorithm that is a simple variant of CART learning, as well as revealing only rounded confidence values. The lesson that emerges is that one can avoid these kinds of MI attacks with negligible degradation to utility.},
booktitle = {Proceedings of the 22nd ACM SIGSAC Conference on Computer and Communications Security},
pages = {1322–1333},
numpages = {12},
keywords = {privacy, machine learning, attacks},
location = {Denver, Colorado, USA},
series = {CCS '15}
}
@misc{papernot2016distillationdefenseadversarialperturbations,
title={Distillation as a Defense to Adversarial Perturbations against Deep Neural Networks},
author={Nicolas Papernot and Patrick McDaniel and Xi Wu and Somesh Jha and Ananthram Swami},
year={2016},
eprint={1511.04508},
archivePrefix={arXiv},
primaryClass={cs.CR},
url={https://arxiv.org/abs/1511.04508},
}
@inproceedings{invert,
author = {He, Zecheng and Zhang, Tianwei and Lee, Ruby B.},
title = {Model inversion attacks against collaborative inference},
year = {2019},
isbn = {9781450376280},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3359789.3359824},
doi = {10.1145/3359789.3359824},
abstract = {The prevalence of deep learning has drawn attention to the privacy protection of sensitive data. Various privacy threats have been presented, where an adversary can steal model owners' private data. Meanwhile, countermeasures have also been introduced to achieve privacy-preserving deep learning. However, most studies only focused on data privacy during training, and ignored privacy during inference.In this paper, we devise a new set of attacks to compromise the inference data privacy in collaborative deep learning systems. Specifically, when a deep neural network and the corresponding inference task are split and distributed to different participants, one malicious participant can accurately recover an arbitrary input fed into this system, even if he has no access to other participants' data or computations, or to prediction APIs to query this system. We evaluate our attacks under different settings, models and datasets, to show their effectiveness and generalization. We also study the characteristics of deep learning models that make them susceptible to such inference privacy threats. This provides insights and guidelines to develop more privacy-preserving collaborative systems and algorithms.},
booktitle = {Proceedings of the 35th Annual Computer Security Applications Conference},
pages = {148–162},
numpages = {15},
keywords = {deep neural network, distributed computation, model inversion attack},
location = {San Juan, Puerto Rico, USA},
series = {ACSAC '19}
}
@inproceedings{detection1, series={NDSS 2018},
title={Feature Squeezing: Detecting Adversarial Examples in Deep Neural Networks},
url={http://dx.doi.org/10.14722/ndss.2018.23198},
DOI={10.14722/ndss.2018.23198},
booktitle={Proceedings 2018 Network and Distributed System Security Symposium},
publisher={Internet Society},
author={Xu, Weilin and Evans, David and Qi, Yanjun},
year={2018},
collection={NDSS 2018} }
@misc{detection2,
title={MagNet: a Two-Pronged Defense against Adversarial Examples},
author={Dongyu Meng and Hao Chen},
year={2017},
eprint={1705.09064},
archivePrefix={arXiv},
primaryClass={cs.CR},
url={https://arxiv.org/abs/1705.09064},
}
@article{albrecht2021homomorphic,
title={Homomorphic encryption standard},
author={Albrecht, Martin and Chase, Melissa and Chen, Hao and Ding, Jintai and Goldwasser, Shafi and Gorbunov, Sergey and Halevi, Shai and Hoffstein, Jeffrey and Laine, Kim and Lauter, Kristin and others},
journal={Protecting privacy through homomorphic encryption},
pages={31--62},
year={2021},
publisher={Springer}
}
@article{chase2017security,
title={Security of homomorphic encryption},
author={Chase, Melissa and Chen, Hao and Ding, Jintai and Goldwasser, Shafi and Gorbunov, Sergey and Hoffstein, Jeffrey and Lauter, Kristin and Lokam, Satya and Moody, Dustin and Morrison, Travis and others},
journal={HomomorphicEncryption. org, Redmond WA, Tech. Rep},
year={2017}
}
@article{damgard2008homomorphic,
title={Homomorphic encryption and secure comparison},
author={Damgard, Ivan and Geisler, Martin and Kroigard, Mikkel},
journal={International Journal of Applied Cryptography},
volume={1},
number={1},
pages={22--31},
year={2008},
publisher={Inderscience Publishers}
}
@article{mouchet2021multiparty,
title={Multiparty homomorphic encryption from ring-learning-with-errors},
author={Mouchet, Christian and Troncoso-Pastoriza, Juan and Bossuat, Jean-Philippe and Hubaux, Jean-Pierre},
journal={Proceedings on Privacy Enhancing Technologies},
volume={2021},
number={4},
pages={291--311},
year={2021}
}
@inproceedings{lee2023efficient,
title={Efficient FHEW bootstrapping with small evaluation keys, and applications to threshold homomorphic encryption},
author={Lee, Yongwoo and Micciancio, Daniele and Kim, Andrey and Choi, Rakyong and Deryabin, Maxim and Eom, Jieun and Yoo, Donghoon},
booktitle={Annual International Conference on the Theory and Applications of Cryptographic Techniques},
pages={227--256},
year={2023},
organization={Springer}
}
@article{fan2012somewhat,
title={Somewhat practical fully homomorphic encryption},
author={Fan, Junfeng and Vercauteren, Frederik},
journal={Cryptology ePrint Archive},
year={2012}
}
@article{brakerski2014leveled,
title={(Leveled) fully homomorphic encryption without bootstrapping},
author={Brakerski, Zvika and Gentry, Craig and Vaikuntanathan, Vinod},
journal={ACM Transactions on Computation Theory (TOCT)},
volume={6},
number={3},
pages={1--36},
year={2014},
publisher={ACM New York, NY, USA}
}
@inproceedings{brakerski2012fully,
title={Fully homomorphic encryption without modulus switching from classical GapSVP},
author={Brakerski, Zvika},
booktitle={Annual cryptology conference},
pages={868--886},
year={2012},
organization={Springer}
}
@misc{wang2023diffusiondblargescalepromptgallery,
title={DiffusionDB: A Large-scale Prompt Gallery Dataset for Text-to-Image Generative Models},
author={Zijie J. Wang and Evan Montoya and David Munechika and Haoyang Yang and Benjamin Hoover and Duen Horng Chau},
year={2023},
eprint={2210.14896},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2210.14896},
}
@inproceedings{Zauner2010ImplementationAB,
title={Implementation and Benchmarking of Perceptual Image Hash Functions},
author={Christoph Zauner},
year={2010},
url={https://api.semanticscholar.org/CorpusID:17075066}
}
@INPROCEEDINGS{imagenet,
author={Deng, Jia and Dong, Wei and Socher, Richard and Li, Li-Jia and Kai Li and Li Fei-Fei},
booktitle={2009 IEEE Conference on Computer Vision and Pattern Recognition},
title={ImageNet: A large-scale hierarchical image database},
year={2009},
volume={},
number={},
pages={248-255},
keywords={Large-scale systems;Image databases;Explosions;Internet;Robustness;Information retrieval;Image retrieval;Multimedia databases;Ontologies;Spine},
doi={10.1109/CVPR.2009.5206848}}
@article{alabdulmohsin2024getting,
title={Getting vit in shape: Scaling laws for compute-optimal model design},
author={Alabdulmohsin, Ibrahim M and Zhai, Xiaohua and Kolesnikov, Alexander and Beyer, Lucas},
journal={Advances in Neural Information Processing Systems},
volume={36},
year={2024}
}
@inproceedings{zhai2023sigmoid,
title={Sigmoid loss for language image pre-training},
author={Zhai, Xiaohua and Mustafa, Basil and Kolesnikov, Alexander and Beyer, Lucas},
booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision},
pages={11975--11986},
year={2023}
}
@inproceedings{dosovitskiy2020vit,
title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale},
author={Dosovitskiy, Alexey and Beyer, Lucas and Kolesnikov, Alexander and Weissenborn, Dirk and Zhai, Xiaohua and Unterthiner, Thomas and Dehghani, Mostafa and Minderer, Matthias and Heigold, Georg and Gelly, Sylvain and Uszkoreit, Jakob and Houlsby, Neil},
booktitle={ICLR},
year={2021}
}
@inproceedings{wortsman2022robust,
title={Robust fine-tuning of zero-shot models},
author={Wortsman, Mitchell and Ilharco, Gabriel and Kim, Jong Wook and Li, Mike and Kornblith, Simon and Roelofs, Rebecca and Lopes, Raphael Gontijo and Hajishirzi, Hannaneh and Farhadi, Ali and Namkoong, Hongseok and others},
booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition},
pages={7959--7971},
year={2022}
}
@misc{dalle3,
author= {James Betker and Gabriel Goh and Li Jing and Tim Brooks and Jianfeng Wang and Linjie Li and Long Ouyang and Juntang Zhuang and Joyce Lee and Yufei Guo and Wesam Manassra and Prafulla Dhariwal and Casey Chu and Yunxin Jiao },
title = "\url{https://openai.com/dall-e-3}",
journal={~},
year={2023}
}
@misc{dalle2,
title="\url{https://openai.com/dall-e-2}",
author={Ramesh, Aditya and Dhariwal, Prafulla and Nichol, Alex and Chu, Casey and Chen, Mark},
journal={arXiv preprint arXiv:2204.06125},
journal={~},
year={2022}
}
@misc{firefly,
author= {Adobe Firefly},
title = "\url{https://www.adobe.com/sensei/generative-ai/firefly.html}",
journal={~},
year={2023}
}
@misc{midjourney,
author= {Midjourney},
title = "\url{https://www.midjourney.com/home}",
journal={~},
year={2023}
}
@inproceedings{dang2015raise,
title={Raise: A raw images dataset for digital image forensics},
author={Dang-Nguyen, Duc-Tien and Pasquini, Cecilia and Conotter, Valentina and Boato, Giulia},
booktitle={Proceedings of the 6th ACM multimedia systems conference},
pages={219--224},
year={2015}
}
@inproceedings{lin2014microsoft,
title={Microsoft coco: Common objects in context},
author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence},
booktitle={Computer Vision--ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13},
pages={740--755},
year={2014},
organization={Springer}
}
@inproceedings{li2022blip,
title={Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation},
author={Li, Junnan and Li, Dongxu and Xiong, Caiming and Hoi, Steven},
booktitle={International conference on machine learning},
pages={12888--12900},
year={2022},
organization={PMLR}
}
@article{podell2023sdxl,
title={Sdxl: Improving latent diffusion models for high-resolution image synthesis},
author={Podell, Dustin and English, Zion and Lacey, Kyle and Blattmann, Andreas and Dockhorn, Tim and M{\"u}ller, Jonas and Penna, Joe and Rombach, Robin},
journal={arXiv preprint arXiv:2307.01952},
year={2023}
}
@inproceedings{corvi2023detection,
title={On the detection of synthetic images generated by diffusion models},
author={Corvi, Riccardo and Cozzolino, Davide and Zingarini, Giada and Poggi, Giovanni and Nagano, Koki and Verdoliva, Luisa},
booktitle={ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages={1--5},
year={2023},
organization={IEEE}
}
@article{fang2023data,
title={Data filtering networks},
author={Fang, Alex and Jose, Albin Madappally and Jain, Amit and Schmidt, Ludwig and Toshev, Alexander and Shankar, Vaishaal},
journal={arXiv preprint arXiv:2309.17425},
year={2023}
}
@misc{bhatia2022exploitingdefendingapproximatelinearity,
title={Exploiting and Defending Against the Approximate Linearity of Apple's NeuralHash},
author={Jagdeep Singh Bhatia and Kevin Meng},
year={2022},
eprint={2207.14258},
archivePrefix={arXiv},
primaryClass={cs.CR},
url={https://arxiv.org/abs/2207.14258},
}
@inproceedings{feature, series={NDSS 2018},
title={Feature Squeezing: Detecting Adversarial Examples in Deep Neural Networks},
url={http://dx.doi.org/10.14722/ndss.2018.23198},
DOI={10.14722/ndss.2018.23198},
booktitle={Proceedings 2018 Network and Distributed System Security Symposium},
publisher={Internet Society},
author={Xu, Weilin and Evans, David and Qi, Yanjun},
year={2018},
collection={NDSS 2018} }
@misc{PhantomZone,
publisher = {Gausslabs},
howpublished = {\url{https://github.com/gausslabs/phantom-zone}},
title = {phantom-zone},
note = {Accessed: 2024-10-26}
}
@article{li2024panther,
title={PANTHER: Private Approximate Nearest Neighbor Search in the Single Server Setting},
author={Li, Jingyu and Huang, Zhicong and Zhang, Min and Liu, Jian and Hong, Cheng and Wei, Tao and Chen, Wenguang},
journal={Cryptology ePrint Archive},
year={2024}
}
@inproceedings{nguyen2024laa,
title={Laa-net: Localized artifact attention network for quality-agnostic and generalizable deepfake detection},
author={Nguyen, Dat and Mejri, Nesryne and Singh, Inder Pal and Kuleshova, Polina and Astrid, Marcella and Kacem, Anis and Ghorbel, Enjie and Aouada, Djamila},
booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
pages={17395--17405},
year={2024}
}
@article{chen2024single,
title={A single simple patch is all you need for ai-generated image detection},
author={Chen, Jiaxuan and Yao, Jieteng and Niu, Li},
journal={arXiv preprint arXiv:2402.01123},
year={2024}
}
@inproceedings{wang2020cnn,
title={CNN-generated images are surprisingly easy to spot... for now},
author={Wang, Sheng-Yu and Wang, Oliver and Zhang, Richard and Owens, Andrew and Efros, Alexei A},
booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition},
pages={8695--8704},
year={2020}
}
@inproceedings{gragnaniello2021gan,
title={Are GAN generated images easy to detect? A critical analysis of the state-of-the-art},
author={Gragnaniello, Diego and Cozzolino, Davide and Marra, Francesco and Poggi, Giovanni and Verdoliva, Luisa},
booktitle={2021 IEEE international conference on multimedia and expo (ICME)},
pages={1--6},
year={2021},
organization={IEEE}
}
@inproceedings{mandelli2022detecting,
title={Detecting gan-generated images by orthogonal training of multiple cnns},
author={Mandelli, Sara and Bonettini, Nicol{\`o} and Bestagini, Paolo and Tubaro, Stefano},
booktitle={2022 IEEE International Conference on Image Processing (ICIP)},
pages={3091--3095},
year={2022},
organization={IEEE}
}
@article{subtle_adversarial,
author = {Veerabadran, Vijay and Goldman, Josh and Shankar, Shreya and Cheung, Brian and Papernot, Nicolas and Kurakin, Alexey and Goodfellow, Ian and Shlens, Jonathon and Sohl-Dickstein, Jascha and Mozer, Michael C. and Elsayed, Gamaleldin F.},
title = {Subtle adversarial image manipulations influence both human and machine perception},
journal = {Nature Communications},
volume = {14},
number = {1},
pages = {4933},
year = {2023},
month = {8},
day = {15},
issn = {2041-1723},
doi = {10.1038/s41467-023-40499-0},
abstract = {Although artificial neural networks (ANNs) were inspired by the brain, ANNs exhibit a brittleness not generally observed in human perception. One shortcoming of ANNs is their susceptibility to adversarial perturbations—subtle modulations of natural images that result in changes to classification decisions, such as confidently mislabelling an image of an elephant, initially classified correctly, as a clock. In contrast, a human observer might well dismiss the perturbations as an innocuous imaging artifact. This phenomenon may point to a fundamental difference between human and machine perception, but it drives one to ask whether human sensitivity to adversarial perturbations might be revealed with appropriate behavioral measures. Here, we find that adversarial perturbations that fool ANNs similarly bias human choice. We further show that the effect is more likely driven by higher-order statistics of natural images to which both humans and ANNs are sensitive, rather than by the detailed architecture of the ANN.}
}
@inproceedings{chai2020makes,
title={What makes fake images detectable? understanding properties that generalize},
author={Chai, Lucy and Bau, David and Lim, Ser-Nam and Isola, Phillip},
booktitle={Computer vision--ECCV 2020: 16th European conference, Glasgow, UK, August 23--28, 2020, proceedings, part XXVI 16},
pages={103--120},