; --------------------------------------------------------------------- ; To submit, log into grace.umd.edu and use the following command: ; /submit 2017 fall ENEE 657 0101 28 adversarial_samples.bib ; --------------------------------------------------------------------- ; Required Readings @ARTICLE{ Carlini2016, title = {{Hidden Voice Commands This paper is included in the Proceedings of the Hidden Voice Commands}}, author = {Carlini, Nicholas and Mishra, Pratyush and Vaidya, Tavish and Zhang, Yuankai and Sherr, Micah and Shields, Clay and Wagner, David and Shields, Clay and Wagner, David}, journal = {USENIX Security Symposium}, year = {2016}, isbn = {9781931971324}, studentfirstname ={}, studentlastname ={}, summary = {}, contribution1 ={}, contribution2 ={}, contribution3 ={}, contribution4 ={}, contribution5 ={}, weakness1 = {}, weakness2 = {}, weakness3 = {}, weakness4 = {}, weakness5 = {}, interesting = {high/med/low}, opinions = {}, } @ARTICLE{ Sharif2016, title = {{Accessorize to a Crime}}, author = {Sharif, Mahmood and Bhagavatula, Sruti and Bauer, Lujo and Reiter, Michael K.}, journal = {Proceedings of the 2016 ACM SIGSAC Conference on Computer and Communications Security - CCS'16}, url = {http://dl.acm.org/citation.cfm?doid=2976749.2978392}, issn = {15437221}, abstract = {Machine learning is enabling amyriad innovations, including new algorithms for cancer diagnosis and self-driving cars. The broad use of machine learning makes it important to understand the extent to which machine-learning algorithms are subject to attack, particularly when used in applications where physical security or safety is at risk. In this paper, we focus on facial biometric systems, which are widely used in surveillance and access control. We de- fine and investigate a novel class of attacks: attacks that are physically realizable and inconspicuous, and allow an at- tacker to evade recognition or impersonate another individ- ual. We develop a systematic method to automatically gen- erate such attacks, which are realized through printing a pair of eyeglass frames. When worn by the attacker whose image is supplied to a state-of-the-art face-recognition algorithm, the eyeglasses allow her to evade being recognized or to im- personate another individual. Our investigation focuses on white-box face-recognition systems, but we also demonstrate how similar techniques can be used in black-box scenarios, as well as to avoid face detection.}, year = {2016}, isbn = {9781450341394}, doi = {10.1145/2976749.2978392}, pages = {1528--1540}, studentfirstname ={}, studentlastname ={}, summary = {}, contribution1 ={}, contribution2 ={}, contribution3 ={}, contribution4 ={}, contribution5 ={}, weakness1 = {}, weakness2 = {}, weakness3 = {}, weakness4 = {}, weakness5 = {}, interesting = {high/med/low}, opinions = {}, } ; BibTex cross-references (don't add anything here)