@article {1400, title = {Give more data, awareness and control to individual citizens, and they will help COVID-19 containment}, year = {2021}, month = {2021/02/02}, abstract = {The rapid dynamics of COVID-19 calls for quick and effective tracking of virus transmission chains and early detection of outbreaks, especially in the {\textquotedblleft}phase 2{\textquotedblright} of the pandemic, when lockdown and other restriction measures are progressively withdrawn, in order to avoid or minimize contagion resurgence. For this purpose, contact-tracing apps are being proposed for large scale adoption by many countries. A centralized approach, where data sensed by the app are all sent to a nation-wide server, raises concerns about citizens{\textquoteright} privacy and needlessly strong digital surveillance, thus alerting us to the need to minimize personal data collection and avoiding location tracking. We advocate the conceptual advantage of a decentralized approach, where both contact and location data are collected exclusively in individual citizens{\textquoteright} {\textquotedblleft}personal data stores{\textquotedblright}, to be shared separately and selectively (e.g., with a backend system, but possibly also with other citizens), voluntarily, only when the citizen has tested positive for COVID-19, and with a privacy preserving level of granularity. This approach better protects the personal sphere of citizens and affords multiple benefits: it allows for detailed information gathering for infected people in a privacy-preserving fashion; and, in turn this enables both contact tracing, and, the early detection of outbreak hotspots on more finely-granulated geographic scale. The decentralized approach is also scalable to large populations, in that only the data of positive patients need be handled at a central level. Our recommendation is two-fold. First to extend existing decentralized architectures with a light touch, in order to manage the collection of location data locally on the device, and allow the user to share spatio-temporal aggregates{\textemdash}if and when they want and for specific aims{\textemdash}with health authorities, for instance. Second, we favour a longer-term pursuit of realizing a Personal Data Store vision, giving users the opportunity to contribute to collective good in the measure they want, enhancing self-awareness, and cultivating collective efforts for rebuilding society.}, isbn = {1572-8439}, doi = {https://doi.org/10.1007/s10676-020-09572-w}, url = {https://link.springer.com/article/10.1007/s10676-020-09572-w}, author = {Mirco Nanni and Andrienko, Gennady and Barabasi, Albert-Laszlo and Boldrini, Chiara and Bonchi, Francesco and Cattuto, Ciro and Chiaromonte, Francesca and Comand{\'e}, Giovanni and Conti, Marco and Cot{\'e}, Mark and Dignum, Frank and Dignum, Virginia and Domingo-Ferrer, Josep and Ferragina, Paolo and Fosca Giannotti and Riccardo Guidotti and Helbing, Dirk and Kaski, Kimmo and Kert{\'e}sz, J{\'a}nos and Lehmann, Sune and Lepri, Bruno and Lukowicz, Paul and Matwin, Stan and Jim{\'e}nez, David Meg{\'\i}as and Anna Monreale and Morik, Katharina and Oliver, Nuria and Passarella, Andrea and Passerini, Andrea and Dino Pedreschi and Pentland, Alex and Pianesi, Fabio and Francesca Pratesi and S Rinzivillo and Salvatore Ruggieri and Siebes, Arno and Torra, Vicenc and Roberto Trasarti and Hoven, Jeroen van den and Vespignani, Alessandro} } @article {759, title = {Discrimination- and privacy-aware patterns}, journal = {Data Min. Knowl. Discov.}, volume = {29}, number = {6}, year = {2015}, pages = {1733{\textendash}1782}, abstract = {Data mining is gaining societal momentum due to the ever increasing availability of large amounts of human data, easily collected by a variety of sensing technologies. We are therefore faced with unprecedented opportunities and risks: a deeper understanding of human behavior and how our society works is darkened by a greater chance of privacy intrusion and unfair discrimination based on the extracted patterns and profiles. Consider the case when a set of patterns extracted from the personal data of a population of individual persons is released for a subsequent use into a decision making process, such as, e.g., granting or denying credit. First, the set of patterns may reveal sensitive information about individual persons in the training population and, second, decision rules based on such patterns may lead to unfair discrimination, depending on what is represented in the training cases. Although methods independently addressing privacy or discrimination in data mining have been proposed in the literature, in this context we argue that privacy and discrimination risks should be tackled together, and we present a methodology for doing so while publishing frequent pattern mining results. We describe a set of pattern sanitization methods, one for each discrimination measure used in the legal literature, to achieve a fair publishing of frequent patterns in combination with two possible privacy transformations: one based on k-anonymity and one based on differential privacy. Our proposed pattern sanitization methods based on k-anonymity yield both privacy- and discrimination-protected patterns, while introducing reasonable (controlled) pattern distortion. Moreover, they obtain a better trade-off between protection and data quality than the sanitization methods based on differential privacy. Finally, the effectiveness of our proposals is assessed by extensive experiments. }, doi = {10.1007/s10618-014-0393-7}, url = {http://dx.doi.org/10.1007/s10618-014-0393-7}, author = {Sara Hajian and Josep Domingo-Ferrer and Anna Monreale and Dino Pedreschi and Fosca Giannotti} } @conference {566, title = {Fair pattern discovery}, booktitle = {Symposium on Applied Computing, {SAC} 2014, Gyeongju, Republic of Korea - March 24 - 28, 2014}, year = {2014}, pages = {113{\textendash}120}, abstract = {Data mining is gaining societal momentum due to the ever increasing availability of large amounts of human data, easily collected by a variety of sensing technologies. We are assisting to unprecedented opportunities of understanding human and society behavior that unfortunately is darkened by several risks for human rights: one of this is the unfair discrimination based on the extracted patterns and profiles. Consider the case when a set of patterns extracted from the personal data of a population of individual persons is released for subsequent use in a decision making process, such as, e.g., granting or denying credit. Decision rules based on such patterns may lead to unfair discrimination, depending on what is represented in the training cases. In this context, we address the discrimination risks resulting from publishing frequent patterns. We present a set of pattern sanitization methods, one for each discrimination measure used in the legal literature, for fair (discrimination-protected) publishing of frequent pattern mining results. Our proposed pattern sanitization methods yield discrimination-protected patterns, while introducing reasonable (controlled) pattern distortion. Finally, the effectiveness of our proposals is assessed by extensive experiments.}, doi = {10.1145/2554850.2555043}, url = {http://doi.acm.org/10.1145/2554850.2555043}, author = {Sara Hajian and Anna Monreale and Dino Pedreschi and Josep Domingo-Ferrer and Fosca Giannotti} } @conference {569, title = {Injecting Discrimination and Privacy Awareness Into Pattern Discovery}, booktitle = {12th {IEEE} International Conference on Data Mining Workshops, {ICDM} Workshops, Brussels, Belgium, December 10, 2012}, year = {2012}, pages = {360{\textendash}369}, abstract = {Data mining is gaining societal momentum due to the ever increasing availability of large amounts of human data, easily collected by a variety of sensing technologies. Data mining comes with unprecedented opportunities and risks: a deeper understanding of human behavior and how our society works is darkened by a greater chance of privacy intrusion and unfair discrimination based on the extracted patterns and profiles. Although methods independently addressing privacy or discrimination in data mining have been proposed in the literature, in this context we argue that privacy and discrimination risks should be tackled together, and we present a methodology for doing so while publishing frequent pattern mining results. We describe a combined pattern sanitization framework that yields both privacy and discrimination-protected patterns, while introducing reasonable (controlled) pattern distortion.}, doi = {10.1109/ICDMW.2012.51}, url = {http://dx.doi.org/10.1109/ICDMW.2012.51}, author = {Sara Hajian and Anna Monreale and Dino Pedreschi and Josep Domingo-Ferrer and Fosca Giannotti} }