; --------------------------------------------------------------------- ; To submit, log into grace.umd.edu and use the following command: ; /submit 2017 fall ENEE 657 0101 26 evasion_attacks.bib ; --------------------------------------------------------------------- ; Required Readings @ARTICLE{ Barreno2010, title = {{The security of machine learning}}, author = {Barreno, Marco and Nelson, Blaine and Joseph, Anthony D. and Tygar, J. D.}, journal = {Machine Learning}, issn = {08856125}, year = {2010}, abstract = {Machine learning's ability to rapidly evolve to changing and complex situations has helped it become a fundamental tool for computer security. That adaptability is also a vulnerability: attackers can exploit machine learning systems. We present a taxonomy identifying and analyzing attacks against machine learning systems. We show how these classes influence the costs for the attacker and defender, and we give a formal structure defining their interaction. We use our framework to survey and analyze the literature of attacks against machine learning systems. We also illustrate our taxonomy by showing how it can guide attacks against SpamBayes, a popular statistical spam filter. Finally, we discuss how our taxonomy suggests new lines of defenses.}, pages = {121--148}, keywords = {Adversarial environments,Adversarial learning,Security}, doi = {10.1007/s10994-010-5188-5}, volume = {81}, number = {2}, isbn = {1573-0565}, studentfirstname ={}, studentlastname ={}, summary = {}, contribution1 ={}, contribution2 ={}, contribution3 ={}, contribution4 ={}, contribution5 ={}, weakness1 = {}, weakness2 = {}, weakness3 = {}, weakness4 = {}, weakness5 = {}, interesting = {high/med/low}, opinions = {}, } @INPROCEEDINGS{ Carlini2017, title = {{Towards Evaluating the Robustness of Neural Networks}}, author = {Carlini, Nicholas and Wagner, David}, booktitle = {Proceedings - IEEE Symposium on Security and Privacy}, isbn = {9781509055326}, pmid = {7546524}, archiveprefix = {arXiv}, year = {2017}, issn = {10816011}, doi = {10.1109/SP.2017.49}, pages = {39--57}, arxivid = {1608.04644}, eprint = {1608.04644}, abstract = {Neural networks provide state-of-the-art results for most machine learning tasks. Unfortunately, neural networks are vulnerable to adversarial examples: given an input {\$}x{\$} and any target classification {\$}t{\$}, it is possible to find a new input {\$}x'{\$} that is similar to {\$}x{\$} but classified as {\$}t{\$}. This makes it difficult to apply neural networks in security-critical areas. Defensive distillation is a recently proposed approach that can take an arbitrary neural network, and increase its robustness, reducing the success rate of current attacks' ability to find adversarial examples from {\$}95\backslash{\%}{\$} to {\$}0.5\backslash{\%}{\$}. In this paper, we demonstrate that defensive distillation does not significantly increase the robustness of neural networks by introducing three new attack algorithms that are successful on both distilled and undistilled neural networks with {\$}100\backslash{\%}{\$} probability. Our attacks are tailored to three distance metrics used previously in the literature, and when compared to previous adversarial example generation algorithms, our attacks are often much more effective (and never worse). Furthermore, we propose using high-confidence adversarial examples in a simple transferability test we show can also be used to break defensive distillation. We hope our attacks will be used as a benchmark in future defense attempts to create neural networks that resist adversarial examples.}, studentfirstname ={}, studentlastname ={}, summary = {}, contribution1 ={}, contribution2 ={}, contribution3 ={}, contribution4 ={}, contribution5 ={}, weakness1 = {}, weakness2 = {}, weakness3 = {}, weakness4 = {}, weakness5 = {}, interesting = {high/med/low}, opinions = {}, } ; BibTex cross-references (don't add anything here)